code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = filter_non_english
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
UpperCAmelCase : Any = {}
UpperCAmelCase : Union[str, Any] = {}
for i, value in enumerate(lowercase ):
UpperCAmelCase : Union[str, Any] = i
UpperCAmelCase : str = i
UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(lowercase , lowercase , ensure_ascii=lowercase )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(lowercase , lowercase , ensure_ascii=lowercase )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(lowercase , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase ) , [5, 6, 2, 5, 7, 8] )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Any = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Any = RoCBertBasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=lowercase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase : List[Any] = {}
for i, token in enumerate(lowercase ):
UpperCAmelCase : Dict = i
UpperCAmelCase : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : Any = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase : Tuple = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
UpperCAmelCase : str = tokenizer_r.do_lower_case if hasattr(lowercase , "do_lower_case" ) else False
UpperCAmelCase : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : List[str] = ["的", "人", "有"]
UpperCAmelCase : int = "".join(lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : Dict = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Union[str, Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowercase )
UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
UpperCAmelCase : List[str] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Dict = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : str = tokenizer_r.convert_ids_to_tokens(lowercase )
UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase : List[str] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowercase )
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
@slow
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase : Any = tokenizer.encode("你好" , add_special_tokens=lowercase )
UpperCAmelCase : List[str] = tokenizer.encode("你是谁" , add_special_tokens=lowercase )
UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase : Tuple = "你好,你是谁"
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(lowercase )
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(lowercase )
UpperCAmelCase : str = tokenizer.convert_tokens_to_shape_ids(lowercase )
UpperCAmelCase : Dict = tokenizer.convert_tokens_to_pronunciation_ids(lowercase )
UpperCAmelCase : Optional[Any] = tokenizer.prepare_for_model(
lowercase , lowercase , lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Optional[Any] = tokenizer.encode_plus(lowercase , add_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
| 595
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {"""vocab_file""": """vocab.txt"""}
snake_case_ : Optional[Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
snake_case_ : Optional[Any] = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def lowercase_ ( _lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = collections.OrderedDict()
with open(_lowercase , "r" , encoding="utf-8" ) as reader:
UpperCAmelCase : Tuple = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCAmelCase : Union[str, Any] = token.rstrip("\n" )
UpperCAmelCase : Tuple = index
return vocab
class snake_case__ ( lowerCAmelCase_ ):
def __init__( self : List[Any] , lowercase : str , lowercase : Any="<unk>" , lowercase : Optional[Any]=2_00 ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab
UpperCAmelCase : Tuple = unk_token
UpperCAmelCase : Optional[Any] = max_input_chars_per_word
def __lowerCAmelCase ( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : str = list(lowercase )
if len(lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = []
while start < len(lowercase ):
UpperCAmelCase : List[Any] = len(lowercase )
UpperCAmelCase : Any = None
while start < end:
UpperCAmelCase : Optional[Any] = "".join(chars[start:end] )
if substr in self.vocab:
UpperCAmelCase : List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowercase )
UpperCAmelCase : Dict = end
return sub_tokens
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = False
def __init__( self : Tuple , lowercase : Union[str, Any] , lowercase : Tuple="<d>" , lowercase : Tuple="</d>" , lowercase : Optional[int]="<s>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<pad>" , lowercase : List[Any]="<unk>" , lowercase : Any="</n>" , lowercase : Dict="</_>" , lowercase : int="left" , **lowercase : int , ):
'''simple docstring'''
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowercase , eod_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , unk_token=lowercase , line_token=lowercase , space_token=lowercase , padding_side=lowercase , **lowercase , )
UpperCAmelCase : int = bod_token
UpperCAmelCase : int = eod_token
UpperCAmelCase : Dict = load_vocab(lowercase )
UpperCAmelCase : Optional[Any] = self.encoder[space_token]
UpperCAmelCase : int = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase : x[1] ) )
UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.encoder["\n"]
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : Dict , lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
for x in jieba.cut(lowercase , cut_all=lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase ) )
return output_tokens
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : Tuple , **lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase : int = [i for i in token_ids if i >= 0]
UpperCAmelCase : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowercase , **lowercase )
def __lowerCAmelCase ( self : Dict , lowercase : int ):
'''simple docstring'''
return token in self.encoder
def __lowerCAmelCase ( self : str , lowercase : List[str] ):
'''simple docstring'''
return "".join(lowercase )
def __lowerCAmelCase ( self : str , lowercase : Any ):
'''simple docstring'''
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : Dict , lowercase : Dict ):
'''simple docstring'''
return self.decoder.get(lowercase , self.unk_token )
def __lowerCAmelCase ( self : Optional[int] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(lowercase ):
UpperCAmelCase : Dict = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase : str = (filename_prefix + "-" if filename_prefix else "") + save_directory
UpperCAmelCase : Optional[Any] = 0
if " " in self.encoder:
UpperCAmelCase : int = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase : List[str] = self.encoder["\n"]
del self.encoder["\n"]
UpperCAmelCase : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase : x[1] ) )
with open(lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase : int = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def __lowerCAmelCase ( self : Tuple , lowercase : List[int] , lowercase : List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCAmelCase ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase ))
return [1] + ([0] * len(lowercase ))
| 595
| 1
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowercase ( *UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__=True , UpperCAmelCase__=2 ):
"""simple docstring"""
from .. import __version__
__lowerCAmelCase = take_from
__lowerCAmelCase = ()
if not isinstance(args[0] , UpperCAmelCase__ ):
__lowerCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase__ ).base_version ) >= version.parse(UpperCAmelCase__ ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'"""
F""" version {__version__} is >= {version_name}""" )
__lowerCAmelCase = None
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase__ ),)
__lowerCAmelCase = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(UpperCAmelCase__ , UpperCAmelCase__ ):
values += (getattr(UpperCAmelCase__ , UpperCAmelCase__ ),)
__lowerCAmelCase = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
__lowerCAmelCase = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
__lowerCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , UpperCAmelCase__ , stacklevel=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) > 0:
__lowerCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
__lowerCAmelCase = call_frame.filename
__lowerCAmelCase = call_frame.lineno
__lowerCAmelCase = call_frame.function
__lowerCAmelCase, __lowerCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(UpperCAmelCase__ ) == 0:
return
elif len(UpperCAmelCase__ ) == 1:
return values[0]
return values
| 702
|
def __lowercase ( UpperCAmelCase__ = 10 , UpperCAmelCase__ = 1_000 , UpperCAmelCase__ = True ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__lowerCAmelCase = lower
__lowerCAmelCase = higher
__lowerCAmelCase = []
while True:
__lowerCAmelCase = get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
__lowerCAmelCase = number
elif answer(UpperCAmelCase__ ) == "high":
__lowerCAmelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = int(input('Enter lower value : ' ).strip() )
__lowerCAmelCase = int(input('Enter high value : ' ).strip() )
__lowerCAmelCase = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 102
| 0
|
"""simple docstring"""
from ... import PretrainedConfig
_snake_case = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ : int = 'nezha'
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=2_11_28 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Any=30_72 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : int=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = max_relative_position
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = use_cache
| 510
|
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
if not isinstance(_a , _a ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = logging.get_logger()
# the current default level is logging.WARNING
lowercase__ : Any = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = logging.get_verbosity()
lowercase__ : Optional[Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""")
lowercase__ : str = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE_) as cl:
logger.warning(SCREAMING_SNAKE_CASE_)
self.assertEqual(cl.out , msg + """\n""")
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE_) as cl:
logger.warning(SCREAMING_SNAKE_CASE_)
self.assertEqual(cl.out , """""")
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE_) as cl:
logger.warning(SCREAMING_SNAKE_CASE_)
self.assertEqual(cl.out , msg + """\n""")
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE_)
@mockenv(TRANSFORMERS_VERBOSITY="""error""")
def lowercase__ ( self):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase__ : int = logging.get_logger("""transformers.models.bart.tokenization_bart""")
lowercase__ : Optional[int] = os.getenv("""TRANSFORMERS_VERBOSITY""" , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = logging.log_levels[env_level_str]
lowercase__ : Dict = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
lowercase__ : int = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""")
def lowercase__ ( self):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
lowercase__ : str = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE_) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""")
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out)
# no need to restore as nothing was changed
def lowercase__ ( self):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
lowercase__ : Optional[Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""")
lowercase__ : List[Any] = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1"""):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE_) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE_)
self.assertEqual(cl.out , """""")
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=""""""):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE_) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE_)
self.assertEqual(cl.out , msg + """\n""")
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 708
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = data
lowercase__ : Node | None = None
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = None
lowercase__ : int = None
def __iter__( self):
'''simple docstring'''
lowercase__ : List[str] = self.head
while self.head:
yield node.data
lowercase__ : str = node.next
if node == self.head:
break
def __len__( self):
'''simple docstring'''
return sum(1 for _ in self)
def __repr__( self):
'''simple docstring'''
return "->".join(str(SCREAMING_SNAKE_CASE_) for item in iter(self))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.insert_nth(len(self) , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.insert_nth(0 , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if index < 0 or index > len(self):
raise IndexError("""list index out of range.""")
lowercase__ : Optional[int] = Node(SCREAMING_SNAKE_CASE_)
if self.head is None:
lowercase__ : Union[str, Any] = new_node # first node points itself
lowercase__ : Dict = new_node
elif index == 0: # insert at head
lowercase__ : int = self.head
lowercase__ : int = new_node
else:
lowercase__ : List[str] = self.head
for _ in range(index - 1):
lowercase__ : Union[str, Any] = temp.next
lowercase__ : int = temp.next
lowercase__ : Optional[Any] = new_node
if index == len(self) - 1: # insert at tail
lowercase__ : Union[str, Any] = new_node
def lowercase__ ( self):
'''simple docstring'''
return self.delete_nth(0)
def lowercase__ ( self):
'''simple docstring'''
return self.delete_nth(len(self) - 1)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ = 0):
'''simple docstring'''
if not 0 <= index < len(self):
raise IndexError("""list index out of range.""")
lowercase__ : str = self.head
if self.head == self.tail: # just one node
lowercase__ : List[Any] = None
elif index == 0: # delete head node
lowercase__ : str = self.tail.next.next
lowercase__ : List[Any] = self.head.next
else:
lowercase__ : Optional[int] = self.head
for _ in range(index - 1):
lowercase__ : List[Any] = temp.next
lowercase__ : Optional[Any] = temp.next
lowercase__ : Dict = temp.next.next
if index == len(self) - 1: # delete at tail
lowercase__ : Tuple = temp
return delete_node.data
def lowercase__ ( self):
'''simple docstring'''
return len(self) == 0
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase__ : int = CircularLinkedList()
assert len(lowercase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase_ ) == i
circular_linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 495
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _lowerCamelCase=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _lowerCamelCase=True , ):
lowerCAmelCase_ = size if size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_convert_rgb
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase_ ( self , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCAmelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCAmelCase_ = []
for i in range(self.batch_size ):
lowerCAmelCase_ ,lowerCAmelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCAmelCase_ = [torch.from_numpy(_lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __UpperCAmelCase ( __a , unittest.TestCase ):
__A : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_convert_rgb''' ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class __UpperCAmelCase ( __a , unittest.TestCase ):
__A : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_lowerCamelCase )
lowerCAmelCase_ = 3
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_convert_rgb''' ) )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 274
|
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case_ ( *__snake_case : Optional[int]) -> int:
if not isinstance(__snake_case , __snake_case):
lowerCAmelCase_ = list(__snake_case)
for i in range(len(__snake_case)):
lowerCAmelCase_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case_ ( __snake_case : Exception) -> bool:
lowerCAmelCase_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(__snake_case , __snake_case) and len(exception.args) == 1:
return any(err in exception.args[0] for err in _statements)
return False
def snake_case_ ( __snake_case : callable = None , __snake_case : int = 128) -> List[Any]:
if function is None:
return functools.partial(__snake_case , starting_batch_size=__snake_case)
lowerCAmelCase_ = starting_batch_size
def decorator(*__snake_case : List[str] , **__snake_case : Union[str, Any]):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowerCAmelCase_ = list(inspect.signature(__snake_case).parameters.keys())
# Guard against user error
if len(__snake_case) < (len(__snake_case) + 1):
lowerCAmelCase_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:])])
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''')
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''')
try:
return function(__snake_case , *__snake_case , **__snake_case)
except Exception as e:
if should_reduce_batch_size(__snake_case):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 274
| 1
|
def _A ( __A: str ):
'''simple docstring'''
__magic_name__ : Dict = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( __A: str ):
'''simple docstring'''
__magic_name__ : Optional[int] = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
__magic_name__ : Union[str, Any] = remove_duplicates(key.upper() )
__magic_name__ : Dict = len(__A )
# First fill cipher with key characters
__magic_name__ : Union[str, Any] = {alphabet[i]: char for i, char in enumerate(__A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__A ) ,2_6 ):
__magic_name__ : List[str] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__magic_name__ : List[Any] = alphabet[i - offset]
__magic_name__ : Union[str, Any] = char
return cipher_alphabet
def _A ( __A: str ,__A: dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(__A ,__A ) for ch in message.upper() )
def _A ( __A: str ,__A: dict[str, str] ):
'''simple docstring'''
__magic_name__ : str = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__A ,__A ) for ch in message.upper() )
def _A ( ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = input('''Enter message to encode or decode: ''' ).strip()
__magic_name__ : int = input('''Enter keyword: ''' ).strip()
__magic_name__ : int = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__magic_name__ : Optional[Any] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__magic_name__ : Tuple = create_cipher_map(__A )
print(func(__A ,__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 714
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =CanineTokenizer
UpperCamelCase__ =False
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
super().setUp()
__magic_name__ : Optional[int] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def UpperCAmelCase__ ( self : Dict , **lowerCamelCase_ : Optional[int] ) -> CanineTokenizer:
__magic_name__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
__magic_name__ : List[str] = 1024
return tokenizer
@require_torch
def UpperCAmelCase__ ( self : int ) -> int:
__magic_name__ : List[Any] = self.canine_tokenizer
__magic_name__ : Any = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__magic_name__ : Optional[int] = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
__magic_name__ : List[Any] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__ : Any = self.canine_tokenizer
__magic_name__ : Dict = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__magic_name__ : Dict = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , lowerCamelCase_ )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertIn('''token_type_ids''' , lowerCamelCase_ )
@require_torch
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
__magic_name__ : int = self.canine_tokenizer
__magic_name__ : Dict = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__magic_name__ : List[Any] = tokenizer(
text_target=lowerCamelCase_ , max_length=32 , padding='''max_length''' , truncation=lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
# safety check on max_len default value so we are sure the test works
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
__magic_name__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
__magic_name__ : Tuple = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
__magic_name__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Any = tempfile.mkdtemp()
__magic_name__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__magic_name__ : str = chr(0XE007 )
additional_special_tokens.append(lowerCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__magic_name__ : Tuple = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
__magic_name__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
__magic_name__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertIn(lowerCamelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase_ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
__magic_name__ : Any = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ , __magic_name__ : str = self.get_clean_sequence(lowerCamelCase_ )
# a special token for Canine can be defined as follows:
__magic_name__ : Optional[Any] = 0XE005
__magic_name__ : Optional[Any] = chr(lowerCamelCase_ )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__magic_name__ : Optional[int] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
__magic_name__ : Union[str, Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase_ )
__magic_name__ : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Optional[Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , input_encoded + special_token_id )
__magic_name__ : Any = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__ : str = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : Tuple = chr(0XE005 )
__magic_name__ : Optional[int] = chr(0XE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__magic_name__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
__magic_name__ : Optional[int] = tokenizer.tokenize(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCamelCase_ )
self.assertEqual(token_a[0] , lowerCamelCase_ )
@require_tokenizers
def UpperCAmelCase__ ( self : str ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__magic_name__ : str = 0XE006
__magic_name__ : Optional[int] = chr(lowerCamelCase_ )
__magic_name__ : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCamelCase_ )
tokenizer.from_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ : List[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ : int = json.load(lowerCamelCase_ )
# a special token for Canine can be defined as follows:
__magic_name__ : List[str] = 0XE006
__magic_name__ : List[str] = chr(lowerCamelCase_ )
__magic_name__ : int = [new_token_a]
__magic_name__ : str = [new_token_a]
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : int = tokenizer_class.from_pretrained(lowerCamelCase_ , extra_ids=0 )
self.assertIn(lowerCamelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__magic_name__ : Optional[int] = 0XE007
__magic_name__ : List[str] = chr(lowerCamelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : List[str] = [AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ )]
__magic_name__ : str = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , extra_ids=0 )
self.assertIn(lowerCamelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self : Any ) -> List[str]:
__magic_name__ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : Union[str, Any] = '''hello world'''
if self.space_between_special_tokens:
__magic_name__ : List[Any] = '''[CLS] hello world [SEP]'''
else:
__magic_name__ : List[str] = input
__magic_name__ : Dict = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Union[str, Any] = tokenizer.decode(lowerCamelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCamelCase_ , [output, output.lower()] )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : str = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__magic_name__ : Any = '''a'''
__magic_name__ : List[str] = ord(lowerCamelCase_ )
for attr in attributes_list:
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [] )
__magic_name__ : Any = 0XE006
__magic_name__ : str = chr(lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self : Tuple ) -> Any:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
pass
| 501
| 0
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __A ( a_ :Dict) -> Tuple:
if is_torch_version('''<''' , '''2.0.0''') or not hasattr(a_ , '''_dynamo'''):
return False
return isinstance(a_ , torch._dynamo.eval_frame.OptimizedModule)
def __A ( a_ :Any , a_ :bool = True) -> List[str]:
__a : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Tuple = is_compiled_module(a_)
if is_compiled:
__a : Tuple = model
__a : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(a_ , a_):
__a : Tuple = model.module
if not keep_fpaa_wrapper:
__a : Tuple = getattr(a_ , '''forward''')
__a : List[str] = model.__dict__.pop('''_original_forward''' , a_)
if original_forward is not None:
while hasattr(a_ , '''__wrapped__'''):
__a : int = forward.__wrapped__
if forward == original_forward:
break
__a : Tuple = forward
if getattr(a_ , '''_converted_to_transformer_engine''' , a_):
convert_model(a_ , to_transformer_engine=a_)
if is_compiled:
__a : str = model
__a : List[Any] = compiled_model
return model
def __A ( ) -> Union[str, Any]:
PartialState().wait_for_everyone()
def __A ( a_ :List[str] , a_ :List[Any]) -> Union[str, Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(a_ , a_)
elif PartialState().local_process_index == 0:
torch.save(a_ , a_)
@contextmanager
def __A ( **a_ :Optional[int]) -> str:
for key, value in kwargs.items():
__a : int = str(a_)
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __A ( a_ :Any) -> str:
if not hasattr(a_ , '''__qualname__''') and not hasattr(a_ , '''__name__'''):
__a : int = getattr(a_ , '''__class__''' , a_)
if hasattr(a_ , '''__qualname__'''):
return obj.__qualname__
if hasattr(a_ , '''__name__'''):
return obj.__name__
return str(a_)
def __A ( a_ :Any , a_ :Any) -> int:
for key, value in source.items():
if isinstance(a_ , a_):
__a : Tuple = destination.setdefault(a_ , {})
merge_dicts(a_ , a_)
else:
__a : Tuple = value
return destination
def __A ( a_ :int = None) -> bool:
if port is None:
__a : Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM) as s:
return s.connect_ex(('''localhost''', port)) == 0
| 52
|
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
# Base Case
if index == len(SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Color current vertex
UpperCAmelCase = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = [-1] * len(SCREAMING_SNAKE_CASE )
if util_color(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 447
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["MobileNetV2FeatureExtractor"]
_snake_case = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 413
|
from math import pi
def lowerCamelCase_ ( A : int , A : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 413
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''ViTFeatureExtractor''']
lowerCamelCase_ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 330
|
from math import isqrt, loga
def lowerCamelCase__ ( __A :int ):
"""simple docstring"""
__snake_case = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__A ,__A ):
__snake_case = False
return [i for i in range(2 ,__A ) if is_prime[i]]
def lowerCamelCase__ ( __A :int = 8_0_0_8_0_0 ,__A :int = 8_0_0_8_0_0 ):
"""simple docstring"""
__snake_case = degree * loga(__A )
__snake_case = int(__A )
__snake_case = calculate_prime_numbers(__A )
__snake_case = 0
__snake_case = 0
__snake_case = len(__A ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 268
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 714
|
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 619
| 0
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self._create_example_records()
lowerCamelCase_ =Dataset.from_list(lowerCAmelCase )
self.assertListEqual(dset.column_names, ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCAmelCase ):
self.assertDictEqual(lowerCAmelCase, example_records[i] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self._create_example_records()
lowerCamelCase_ =Dataset.from_list(lowerCAmelCase )
lowerCamelCase_ =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info, dset_from_dict.info )
def lowercase__ ( self ): # checks what happens with missing columns
"""simple docstring"""
lowerCamelCase_ =[{'''col_1''': 1}, {'''col_2''': '''x'''}]
lowerCamelCase_ =Dataset.from_list(lowerCAmelCase )
self.assertDictEqual(dset[0], {'''col_1''': 1} )
self.assertDictEqual(dset[1], {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self ): # checks if the type can be inferred from the second record
"""simple docstring"""
lowerCamelCase_ =[{'''col_1''': []}, {'''col_1''': [1, 2]}]
lowerCamelCase_ =Dataset.from_list(lowerCAmelCase )
self.assertEqual(dset.info.features['''col_1'''], Sequence(Value('''int64''' ) ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase ), 0 )
self.assertListEqual(dset.column_names, [] )
| 676
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676
| 1
|
"""simple docstring"""
from PIL import Image
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Image:
def brightness(__lowerCamelCase ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
lowerCAmelCase_ = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 122
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["image_processor", "tokenizer"]
lowerCAmelCase : Tuple = "CLIPImageProcessor"
lowerCAmelCase : Optional[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] ,_snake_case : Any=None ,_snake_case : List[Any]=None ,**_snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Optional[Any] = kwargs.pop('''feature_extractor''' )
lowercase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
def __call__( self : List[str] ,_snake_case : Tuple=None ,_snake_case : str=None ,_snake_case : Dict=None ,**_snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : Optional[Any] = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : List[Any] ,*_snake_case : Optional[int] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : Union[str, Any] ,**_snake_case : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
@property
def UpperCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,_snake_case ,)
return self.image_processor
| 122
| 1
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = int(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=3_0_0 ) -> Dict:
# docstyle-ignore
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE__ = f'''{elt:.6f}''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else str(lowerCAmelCase_ )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __snake_case :
'''simple docstring'''
lowerCamelCase__ : Tuple = 5
lowerCamelCase__ : str = 0.2
def __init__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = 3_00 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = total
SCREAMING_SNAKE_CASE__ = '''''' if prefix is None else prefix
SCREAMING_SNAKE_CASE__ = leave
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = width
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def lowercase_ ( self , A_ , A_ = False , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = value
if comment is not None:
SCREAMING_SNAKE_CASE__ = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.warmup
SCREAMING_SNAKE_CASE__ = 1
self.update_bar(A_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE__ = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE__ = None
if value >= self.total:
SCREAMING_SNAKE_CASE__ = self.total
SCREAMING_SNAKE_CASE__ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE__ = self.average_time_per_item * (self.total - value)
self.update_bar(A_ )
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE__ = 1
else:
SCREAMING_SNAKE_CASE__ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowercase_ ( self , A_ , A_=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ''' ''' * (len(str(self.total ) ) - len(str(A_ ) )) + str(A_ )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE__ = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE__ = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
SCREAMING_SNAKE_CASE__ = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE__ = disp.display(disp.HTML(self.html_code ) , display_id=A_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase_ ( self ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , A_ , A_=None ):
'''simple docstring'''
super().__init__(A_ )
SCREAMING_SNAKE_CASE__ = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE__ = None
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE__ = disp.display(disp.HTML(self.html_code ) , display_id=A_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase_ ( self , A_ ):
'''simple docstring'''
if self.inner_table is None:
SCREAMING_SNAKE_CASE__ = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE__ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A_ )
SCREAMING_SNAKE_CASE__ = columns
self.inner_table.append([values[c] for c in columns] )
def lowercase_ ( self , A_ , A_=None , A_=3_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = NotebookProgressBar(A_ , prefix=A_ , parent=self , width=A_ )
return self.child_bar
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
self.display()
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
def lowercase_ ( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
SCREAMING_SNAKE_CASE__ = NotebookTrainingTracker(state.max_steps , A_ )
def lowercase_ ( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
SCREAMING_SNAKE_CASE__ = False
def lowercase_ ( self , A_ , A_ , A_ , A_=None , **A_ ):
'''simple docstring'''
if not has_length(A_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE__ = self.training_tracker.add_child(len(A_ ) )
else:
SCREAMING_SNAKE_CASE__ = NotebookProgressBar(len(A_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowercase_ ( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE__ = None
def lowercase_ ( self , A_ , A_ , A_ , A_=None , **A_ ):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE__ = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE__ = state.global_step
self.training_tracker.write_line(A_ )
def lowercase_ ( self , A_ , A_ , A_ , A_=None , **A_ ):
'''simple docstring'''
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE__ = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE__ = log['''loss''']
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE__ = int(state.epoch )
else:
SCREAMING_SNAKE_CASE__ = state.global_step
SCREAMING_SNAKE_CASE__ = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
SCREAMING_SNAKE_CASE__ = re.sub(r'''\_loss$''' , '''''' , A_ )
SCREAMING_SNAKE_CASE__ = metrics.pop('''total_flos''' , A_ )
SCREAMING_SNAKE_CASE__ = metrics.pop('''epoch''' , A_ )
SCREAMING_SNAKE_CASE__ = metrics.pop(f'''{metric_key_prefix}_runtime''' , A_ )
SCREAMING_SNAKE_CASE__ = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , A_ )
SCREAMING_SNAKE_CASE__ = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , A_ )
SCREAMING_SNAKE_CASE__ = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , A_ )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
SCREAMING_SNAKE_CASE__ = v
else:
SCREAMING_SNAKE_CASE__ = k.split('''_''' )
SCREAMING_SNAKE_CASE__ = ''' '''.join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE__ = v
self.training_tracker.write_line(A_ )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE__ = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE__ = True
def lowercase_ ( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=A_ )
SCREAMING_SNAKE_CASE__ = None
| 100
|
def __snake_case ( ) -> int:
return 1
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ = 2_0_0 ) -> int:
return two_pound(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 100
| 1
|
from bisect import bisect
from itertools import accumulate
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :List[str] = sorted(zip(__magic_name__ , __magic_name__ ) , key=lambda __magic_name__ : x[0] / x[1] , reverse=__magic_name__ )
_lowerCAmelCase :List[Any] = [i[0] for i in r], [i[1] for i in r]
_lowerCAmelCase :Dict = list(accumulate(__magic_name__ ) )
_lowerCAmelCase :Optional[int] = bisect(__magic_name__ , __magic_name__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a = logging.getLogger(__name__)
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = np.argmax(__magic_name__ , axis=1 )
return np.sum(outputs == labels )
def UpperCamelCase_( __magic_name__ : Optional[int] ):
"""simple docstring"""
with open(__magic_name__ , encoding='utf_8' ) as f:
_lowerCAmelCase :Optional[int] = csv.reader(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
next(__magic_name__ ) # skip the first line
for line in tqdm(__magic_name__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = []
for dataset in encoded_datasets:
_lowerCAmelCase :Union[str, Any] = len(__magic_name__ )
_lowerCAmelCase :str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_lowerCAmelCase :int = np.zeros((n_batch, 2) , dtype=np.intaa )
_lowerCAmelCase :Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_lowerCAmelCase :str = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__magic_name__ ):
_lowerCAmelCase :Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCAmelCase :Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCAmelCase :Optional[Any] = with_conta
_lowerCAmelCase :Tuple = with_conta
_lowerCAmelCase :int = len(__magic_name__ ) - 1
_lowerCAmelCase :List[str] = len(__magic_name__ ) - 1
_lowerCAmelCase :List[str] = with_conta
_lowerCAmelCase :Optional[int] = with_conta
_lowerCAmelCase :Optional[int] = mc_label
_lowerCAmelCase :Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__magic_name__ ) for t in all_inputs ) )
return tensor_datasets
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__magic_name__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__magic_name__ , default='' )
parser.add_argument('--eval_dataset' , type=__magic_name__ , default='' )
parser.add_argument('--seed' , type=__magic_name__ , default=42 )
parser.add_argument('--num_train_epochs' , type=__magic_name__ , default=3 )
parser.add_argument('--train_batch_size' , type=__magic_name__ , default=8 )
parser.add_argument('--eval_batch_size' , type=__magic_name__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__magic_name__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__magic_name__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__magic_name__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__magic_name__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__magic_name__ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__magic_name__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__magic_name__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__magic_name__ , default=0.01 )
parser.add_argument('--lm_coef' , type=__magic_name__ , default=0.9 )
parser.add_argument('--n_valid' , type=__magic_name__ , default=374 )
parser.add_argument('--server_ip' , type=__magic_name__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__magic_name__ , default='' , help='Can be used for distant debugging.' )
_lowerCAmelCase :Dict = parser.parse_args()
print(__magic_name__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__magic_name__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_lowerCAmelCase :Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCAmelCase :Tuple = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__magic_name__ , __magic_name__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_lowerCAmelCase :Optional[Any] = ['_start_', '_delimiter_', '_classify_']
_lowerCAmelCase :Tuple = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__magic_name__ )
_lowerCAmelCase :int = tokenizer.convert_tokens_to_ids(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__magic_name__ ) )
model.to(__magic_name__ )
# Load and encode the datasets
def tokenize_and_encode(__magic_name__ : Tuple ):
if isinstance(__magic_name__ , __magic_name__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__magic_name__ ) )
elif isinstance(__magic_name__ , __magic_name__ ):
return obj
return [tokenize_and_encode(__magic_name__ ) for o in obj]
logger.info('Encoding dataset...' )
_lowerCAmelCase :Dict = load_rocstories_dataset(args.train_dataset )
_lowerCAmelCase :Tuple = load_rocstories_dataset(args.eval_dataset )
_lowerCAmelCase :int = (train_dataset, eval_dataset)
_lowerCAmelCase :str = tokenize_and_encode(__magic_name__ )
# Compute the max input length for the Transformer
_lowerCAmelCase :List[str] = model.config.n_positions // 2 - 2
_lowerCAmelCase :List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_lowerCAmelCase :str = min(__magic_name__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_lowerCAmelCase :Optional[int] = pre_process_datasets(__magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ )
_lowerCAmelCase , _lowerCAmelCase :Any = tensor_datasets[0], tensor_datasets[1]
_lowerCAmelCase :Dict = TensorDataset(*__magic_name__ )
_lowerCAmelCase :int = RandomSampler(__magic_name__ )
_lowerCAmelCase :List[str] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=args.train_batch_size )
_lowerCAmelCase :Union[str, Any] = TensorDataset(*__magic_name__ )
_lowerCAmelCase :Any = SequentialSampler(__magic_name__ )
_lowerCAmelCase :Any = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_lowerCAmelCase :List[str] = args.max_steps
_lowerCAmelCase :List[str] = args.max_steps // (len(__magic_name__ ) // args.gradient_accumulation_steps) + 1
else:
_lowerCAmelCase :Optional[Any] = len(__magic_name__ ) // args.gradient_accumulation_steps * args.num_train_epochs
_lowerCAmelCase :str = list(model.named_parameters() )
_lowerCAmelCase :str = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_lowerCAmelCase :Any = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_lowerCAmelCase :Any = AdamW(__magic_name__ , lr=args.learning_rate , eps=args.adam_epsilon )
_lowerCAmelCase :Union[str, Any] = get_linear_schedule_with_warmup(
__magic_name__ , num_warmup_steps=args.warmup_steps , num_training_steps=__magic_name__ )
if args.do_train:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_lowerCAmelCase :Tuple = 0
_lowerCAmelCase :List[Any] = 0
_lowerCAmelCase :Tuple = tqdm(__magic_name__ , desc='Training' )
for step, batch in enumerate(__magic_name__ ):
_lowerCAmelCase :str = tuple(t.to(__magic_name__ ) for t in batch )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Dict = batch
_lowerCAmelCase :List[Any] = model(__magic_name__ , mc_token_ids=__magic_name__ , lm_labels=__magic_name__ , mc_labels=__magic_name__ )
_lowerCAmelCase :List[str] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_lowerCAmelCase :str = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_lowerCAmelCase :Tuple = 'Training loss: {:.2e} lr: {:.2e}'.format(__magic_name__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_lowerCAmelCase :Tuple = model.module if hasattr(__magic_name__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_lowerCAmelCase :Dict = os.path.join(args.output_dir , __magic_name__ )
_lowerCAmelCase :List[str] = os.path.join(args.output_dir , __magic_name__ )
torch.save(model_to_save.state_dict() , __magic_name__ )
model_to_save.config.to_json_file(__magic_name__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_lowerCAmelCase :Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_lowerCAmelCase :str = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__magic_name__ )
if args.do_eval:
model.eval()
_lowerCAmelCase , _lowerCAmelCase :List[Any] = 0, 0
_lowerCAmelCase , _lowerCAmelCase :List[str] = 0, 0
for batch in tqdm(__magic_name__ , desc='Evaluating' ):
_lowerCAmelCase :Optional[int] = tuple(t.to(__magic_name__ ) for t in batch )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :List[Any] = batch
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :List[Any] = model(
__magic_name__ , mc_token_ids=__magic_name__ , lm_labels=__magic_name__ , mc_labels=__magic_name__ )
_lowerCAmelCase :List[Any] = mc_logits.detach().cpu().numpy()
_lowerCAmelCase :Any = mc_labels.to('cpu' ).numpy()
_lowerCAmelCase :int = accuracy(__magic_name__ , __magic_name__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_lowerCAmelCase :List[Any] = eval_loss / nb_eval_steps
_lowerCAmelCase :Optional[int] = eval_accuracy / nb_eval_examples
_lowerCAmelCase :Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
_lowerCAmelCase :List[str] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_lowerCAmelCase :str = os.path.join(args.output_dir , 'eval_results.txt' )
with open(__magic_name__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __magic_name__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 382
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE_ ( *UpperCamelCase__ :Any , **UpperCamelCase__ :int ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :Any ):
_a = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
_a = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :List[str] , UpperCamelCase__ :Tuple ):
_a = object_detector(examples[0] , threshold=0.0 )
_a = len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
"score": ANY(UpperCamelCase__ ),
"label": ANY(UpperCamelCase__ ),
"box": {"xmin": ANY(UpperCamelCase__ ), "ymin": ANY(UpperCamelCase__ ), "xmax": ANY(UpperCamelCase__ ), "ymax": ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def SCREAMING_SNAKE_CASE_ ( self :str ):
pass
@require_torch
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
_a = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
_a = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = pipeline("zero-shot-object-detection" )
_a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
_a = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def SCREAMING_SNAKE_CASE_ ( self :int ):
pass
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = 0.2
_a = pipeline("zero-shot-object-detection" )
_a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = 2
_a = pipeline("zero-shot-object-detection" )
_a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 388
|
"""simple docstring"""
def __a ( a = 6_0_0_8_5_1_4_7_5_1_4_3 ):
"""simple docstring"""
try:
_a = int(a )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(a )
if __name__ == "__main__":
print(f'{solution() = }')
| 388
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : str=13 , _UpperCamelCase : str=32 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : List[Any]=[10, 20, 30, 40] , _UpperCamelCase : int=[2, 2, 3, 2] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[str]=True , _UpperCamelCase : Dict=37 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[Any]=10 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : List[Any]=["stage2", "stage3", "stage4"] , _UpperCamelCase : Union[str, Any]=[2, 3, 4] , _UpperCamelCase : Optional[int]=None , ) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Optional[Any] = num_stages
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : Tuple = depths
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = num_labels
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : List[str] = out_features
_lowerCamelCase : Optional[int] = out_indices
_lowerCamelCase : List[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = ConvNextModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = model(_UpperCamelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Any) ->int:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ConvNextForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = model(_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = ConvNextBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[Any] = model(_UpperCamelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : List[Any] = ConvNextBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = model(_UpperCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = ConvNextModelTester(self)
_lowerCamelCase : List[str] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""")
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""")
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(_UpperCamelCase)
_lowerCamelCase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : str):
_lowerCamelCase : Union[str, Any] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Any = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = ConvNextModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int) ->Dict:
"""simple docstring"""
_lowerCamelCase : List[str] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""").to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase)
# forward pass
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**_UpperCamelCase)
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCamelCase)
_lowerCamelCase : Any = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1]).to(_UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4))
@require_torch
class __snake_case ( unittest.TestCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (ConvNextBackbone,) if is_torch_available() else ()
_snake_case = ConvNextConfig
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ConvNextModelTester(self)
| 15
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15
| 1
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=4 , ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[Any]= parent
SCREAMING_SNAKE_CASE__: List[Any]= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= is_training
SCREAMING_SNAKE_CASE__: Optional[int]= use_attention_mask
SCREAMING_SNAKE_CASE__: Any= use_token_type_ids
SCREAMING_SNAKE_CASE__: Optional[Any]= use_labels
SCREAMING_SNAKE_CASE__: List[str]= vocab_size
SCREAMING_SNAKE_CASE__: str= hidden_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= num_hidden_layers
SCREAMING_SNAKE_CASE__: Dict= num_attention_heads
SCREAMING_SNAKE_CASE__: Optional[int]= intermediate_size
SCREAMING_SNAKE_CASE__: Any= hidden_act
SCREAMING_SNAKE_CASE__: List[str]= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: List[Any]= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: str= max_position_embeddings
SCREAMING_SNAKE_CASE__: List[Any]= type_vocab_size
SCREAMING_SNAKE_CASE__: List[Any]= type_sequence_label_size
SCREAMING_SNAKE_CASE__: Dict= initializer_range
SCREAMING_SNAKE_CASE__: Tuple= num_choices
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: int= None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__: Union[str, Any]= random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__: Optional[Any]= None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__: Dict= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__: Union[str, Any]= RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: int= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: Optional[Any]= config_and_inputs
SCREAMING_SNAKE_CASE__: List[Any]= {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: Any= config_and_inputs
SCREAMING_SNAKE_CASE__: Optional[Any]= True
SCREAMING_SNAKE_CASE__: Optional[int]= floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__: Any= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = True
__a = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__: Optional[int]= model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__: List[Any]= model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__: Optional[int]= np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE__: Any= model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= [1, 11, 50265]
self.assertEqual(list(output.shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__: int= np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: str= FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__: List[str]= np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__: Union[str, Any]= np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 64
|
'''simple docstring'''
import numpy as np
def __UpperCamelCase( _A : np.ndarray , _A : np.ndarray , _A : float = 1e-12 , _A : int = 1_00 , ):
'''simple docstring'''
assert np.shape(_A )[0] == np.shape(_A )[1]
# Ensure proper dimensionality.
assert np.shape(_A )[0] == np.shape(_A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_A ) == np.iscomplexobj(_A )
UpperCAmelCase__ : int = np.iscomplexobj(_A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_A , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 1e12
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : str = np.dot(_A , _A )
# Normalize the resulting output vector.
UpperCAmelCase__ : int = w / np.linalg.norm(_A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : int = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : Any = np.dot(_A , np.dot(_A , _A ) )
# Check convergence.
UpperCAmelCase__ : List[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Optional[int] = lambda_
if is_complex:
UpperCAmelCase__ : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : int = np.array([41, 4, 20] )
UpperCAmelCase__ : str = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : Union[str, Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : Tuple = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : Optional[int] = real_input_matrix
UpperCAmelCase__ : int = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : int = complex_input_matrix
UpperCAmelCase__ : Tuple = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : str = power_iteration(_A , _A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = np.linalg.eigh(_A )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : Union[str, Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_A ) - np.abs(_A ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 614
| 0
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a_ ( lowerCamelCase ):
return 1 / (1 + np.exp(-z ))
def a_ ( lowerCamelCase , lowerCamelCase ):
return (-y * np.log(lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = np.dot(lowerCamelCase , lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(lowerCamelCase ) ) )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=7_0_0_0_0 ):
UpperCAmelCase__ = np.zeros(x.shape[1] )
for iterations in range(lowerCamelCase ):
UpperCAmelCase__ = np.dot(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = sigmoid_function(lowerCamelCase )
UpperCAmelCase__ = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ = theta - alpha * gradient # updating the weights
UpperCAmelCase__ = np.dot(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = sigmoid_function(lowerCamelCase )
UpperCAmelCase__ = cost_function(lowerCamelCase , lowerCamelCase )
if iterations % 1_0_0 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCAmelCase__ : str = datasets.load_iris()
lowerCAmelCase__ : List[str] = iris.data[:, :2]
lowerCAmelCase__ : Optional[int] = (iris.target != 0) * 1
lowerCAmelCase__ : str = 0.1
lowerCAmelCase__ : int = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def a_ ( lowerCamelCase ):
return sigmoid_function(
np.dot(lowerCamelCase , lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((lowerCAmelCase__) , (lowerCAmelCase__)) : Any = (x[:, 0].min(), x[:, 0].max())
((lowerCAmelCase__) , (lowerCAmelCase__)) : str = (x[:, 1].min(), x[:, 1].max())
((lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCAmelCase__ : List[str] = np.c_[xxa.ravel(), xxa.ravel()]
lowerCAmelCase__ : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 632
|
"""simple docstring"""
import random
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : str ):
UpperCAmelCase__ = [ord(lowerCamelCase__ ) for i in text]
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in plain:
UpperCAmelCase__ = random.randint(1 ,300 )
UpperCAmelCase__ = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase__ ) ):
UpperCAmelCase__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ : Dict = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 632
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : List[Any] ="""src/transformers"""
_lowercase : Optional[int] ="""docs/source/en"""
_lowercase : Dict ="""."""
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with open(lowerCAmelCase__ ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
lowerCamelCase_ : List[Any] = f.readlines()
# Find the start prompt.
lowerCamelCase_ : str = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
lowerCamelCase_ : Union[str, Any] = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : Tuple ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_lowercase : Optional[Any] =re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_lowercase : str =re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int =re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Union[str, Any] =direct_transformers_import(TRANSFORMERS_PATH)
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Optional[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' ,lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = 2 if text == '✅' or text == '❌' else len(lowerCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = (width - text_length) // 2
lowerCamelCase_ : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ : int = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCamelCase_ : str = {name: config.replace('Config' ,'' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCamelCase_ : Any = collections.defaultdict(lowerCAmelCase__ )
lowerCamelCase_ : Any = collections.defaultdict(lowerCAmelCase__ )
lowerCamelCase_ : str = collections.defaultdict(lowerCAmelCase__ )
lowerCamelCase_ : Any = collections.defaultdict(lowerCAmelCase__ )
lowerCamelCase_ : str = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
lowerCamelCase_ : int = None
if attr_name.endswith('Tokenizer' ):
lowerCamelCase_ : Dict = slow_tokenizers
lowerCamelCase_ : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
lowerCamelCase_ : int = fast_tokenizers
lowerCamelCase_ : Any = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
lowerCamelCase_ : Tuple = tf_models
lowerCamelCase_ : int = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
lowerCamelCase_ : List[str] = flax_models
lowerCamelCase_ : Optional[int] = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
lowerCamelCase_ : str = pt_models
lowerCamelCase_ : str = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCamelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCamelCase_ : List[Any] = ''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
lowerCamelCase_ : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCamelCase_ : Union[str, Any] = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCamelCase_ : Union[str, Any] = [len(lowerCAmelCase__ ) + 2 for c in columns]
lowerCamelCase_ : List[Any] = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowerCamelCase_ : Tuple = '|' + '|'.join([_center_text(lowerCAmelCase__ ,lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ ,lowerCAmelCase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
lowerCamelCase_ : Optional[Any] = {True: '✅', False: '❌'}
for name in model_names:
lowerCamelCase_ : str = model_name_to_prefix[name]
lowerCamelCase_ : List[str] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ ,lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ ,lowerCAmelCase__ )] ) + "|\n"
return table
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__=False ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ ,'index.md' ) ,start_prompt='<!--This table is updated automatically from the auto modules' ,end_prompt='<!-- End table-->' ,)
lowerCamelCase_ : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ ,'index.md' ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
_lowercase : Optional[int] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowercase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 364
|
import argparse
import os
import re
_lowercase : List[str] ="""src/diffusers"""
# Pattern that looks at the indentation in a line.
_lowercase : str =re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : Dict =re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : Union[str, Any] =re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : Dict =re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : Tuple =re.compile(r"""\[([^\]]+)\]""")
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : int = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__="" ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Union[str, Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
lowerCamelCase_ : Any = ['\n'.join(lines[:index] )]
else:
lowerCamelCase_ : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase_ : List[str] = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
lowerCamelCase_ : int = [lines[index + 1]]
index += 1
else:
lowerCamelCase_ : List[str] = []
else:
blocks.append('\n'.join(lowerCAmelCase__ ) )
lowerCamelCase_ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append('\n'.join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
def _inner(lowerCAmelCase__ ):
return key(lowerCAmelCase__ ).lower().replace('_' ,'' )
return _inner
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
# If no key is provided, we use a noop.
def noop(lowerCAmelCase__ ):
return x
if key is None:
lowerCamelCase_ : int = noop
# Constants are all uppercase, they go first.
lowerCamelCase_ : Any = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase_ : Dict = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase_ : Any = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
lowerCamelCase_ : Optional[Any] = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase__ ):
lowerCamelCase_ : Dict = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCamelCase_ : Optional[int] = [part.strip().replace('"' ,'' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ : str = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase__ )] ) + "]"
lowerCamelCase_ : Tuple = import_statement.split('\n' )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase_ : int = 2 if lines[1].strip() == '[' else 1
lowerCamelCase_ : Any = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase_ : str = sort_objects(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : x[1] )
lowerCamelCase_ : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase_ : Optional[int] = _re_bracket_content.sub(_replace ,lines[1] )
else:
lowerCamelCase_ : Any = [part.strip().replace('"' ,'' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ : List[Any] = keys[:-1]
lowerCamelCase_ : Optional[Any] = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase_ : Any = _re_bracket_content.sub(_replace ,lowerCAmelCase__ )
return import_statement
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__=True ):
with open(lowerCAmelCase__ ,'r' ) as f:
lowerCamelCase_ : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase_ : int = split_code_in_indented_blocks(
lowerCAmelCase__ ,start_prompt='_import_structure = {' ,end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase_ : Any = main_blocks[block_idx]
lowerCamelCase_ : Tuple = block.split('\n' )
# Get to the start of the imports.
lowerCamelCase_ : Optional[int] = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase_ : List[Any] = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase_ : Tuple = '\n'.join(block_lines[line_idx:-1] )
lowerCamelCase_ : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase_ : Dict = split_code_in_indented_blocks(lowerCAmelCase__ ,indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase_ : List[str] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase_ : Tuple = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase_ : Any = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
lowerCamelCase_ : Optional[Any] = [x[0] for x in sorted(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase_ : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase_ : Tuple = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCAmelCase__ ,'w' ) as f:
f.write('\n'.join(lowerCAmelCase__ ) )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__=True ):
lowerCamelCase_ : Dict = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
lowerCamelCase_ : Optional[int] = sort_imports(os.path.join(lowerCAmelCase__ ,'__init__.py' ) ,check_only=lowerCAmelCase__ )
if result:
lowerCamelCase_ : Dict = [os.path.join(lowerCAmelCase__ ,'__init__.py' )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(F"Would overwrite {len(lowerCAmelCase__ )} files, run `make style`." )
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowercase : Union[str, Any] =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 364
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_UpperCAmelCase : Optional[Any] = False
@skip_mps
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase__ = False
UpperCAmelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A_ ( cls : Optional[Any] ) -> List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
@classmethod
def A_ ( cls : Optional[Any] ) -> str:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
def A_ ( self : Any ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
lowerCamelCase__ : Optional[int] = CLIPTextModel(UpperCAmelCase )
lowerCamelCase__ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A_ ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=0 ) -> Tuple:
if str(UpperCAmelCase ).startswith('mps' ):
lowerCamelCase__ : Optional[Any] = torch.manual_seed(UpperCAmelCase )
else:
lowerCamelCase__ : int = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase__ : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def A_ ( self : int ) -> Optional[Any]:
lowerCamelCase__ : str = 'cpu'
lowerCamelCase__ : Dict = self.get_dummy_components()
lowerCamelCase__ : Any = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase__ : int = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = pipe(**UpperCAmelCase ).images
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase__ : Union[str, Any] = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
lowerCamelCase__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def A_ ( self : List[Any] ) -> Tuple:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def A_ ( self : str ) -> Optional[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A_ ( self : Dict ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def A_ ( self : Dict ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=5e-4 )
def A_ ( self : Union[str, Any] ) -> Any:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
@classmethod
def A_ ( cls : Optional[Any] ) -> int:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
@classmethod
def A_ ( cls : int ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : Tuple = torch.manual_seed(51 )
lowerCamelCase__ : str = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCamelCase__ : List[Any] = 'a painting of an elephant with glasses'
lowerCamelCase__ : Tuple = [5, 7]
lowerCamelCase__ : str = pipe(
prompt=UpperCAmelCase , token_indices=UpperCAmelCase , guidance_scale=7.5 , generator=UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCamelCase__ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 188
|
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = Path(_UpperCAmelCase )
lowerCamelCase__ : int = Path(_UpperCAmelCase )
dest_dir.mkdir(exist_ok=_UpperCAmelCase )
for path in src_dir.iterdir():
lowerCamelCase__ : Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCamelCase__ : Optional[int] = dest_dir.joinpath(path.name )
print(_UpperCAmelCase )
dest_path.open('w' ).write('\n'.join(_UpperCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 188
| 1
|
from math import factorial
lowercase : str = {str(d): factorial(d) for d in range(10)}
def lowerCAmelCase__ ( _a : Dict ):
return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ):
snake_case_ : List[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , SCREAMING_SNAKE_CASE_ ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE_ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 568
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] ='▁'
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
SCREAMING_SNAKE_CASE__ : Any ={
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] ={'vinai/bartpho-syllable': 1024}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowercase , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase = None , **_lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
_lowerCamelCase : Optional[int] = vocab_file
_lowerCamelCase : Union[str, Any] = monolingual_vocab_file
_lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : int = cnt
cnt += 1
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_lowerCamelCase : List[Any] = line.strip().split()[0]
_lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids )
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids )
_lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowercase ) -> Optional[int]:
_lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
_lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def a__ ( self ) -> List[str]:
_lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self , _lowercase ) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def a__ ( self , _lowercase ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__ ( self , _lowercase ) -> List[Any]:
return self.fairseq_ids_to_tokens[index]
def a__ ( self , _lowercase ) -> Tuple:
_lowerCamelCase : List[Any] = ''''''.join(_lowercase ).replace(_lowercase , ''' ''' ).strip()
return out_string
def a__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Tuple = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Dict = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
_lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowercase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 434
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class snake_case ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ : Optional[datasets.Features] = None
class snake_case ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ : Optional[Any] = PandasConfig
def UpperCamelCase_ ( self : List[str]) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
_snake_case : Optional[int] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase , (str, list, tuple)):
_snake_case : Optional[Any] = data_files
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case : str = [dl_manager.iter_files(lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
_snake_case : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files}))
return splits
def UpperCamelCase_ ( self : Any , lowerCAmelCase : pa.Table) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_snake_case : Dict = table_cast(lowerCAmelCase , self.config.features.arrow_schema)
return pa_table
def UpperCamelCase_ ( self : str , lowerCAmelCase : Any) -> int:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)):
with open(lowerCAmelCase , """rb""") as f:
_snake_case : str = pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase))
yield i, self._cast_table(lowerCAmelCase)
| 715
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
_snake_case : Any = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
_snake_case : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
_snake_case : Tuple = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
_snake_case : Optional[Any] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
_snake_case : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
_snake_case : Any = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
_snake_case : int = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
_snake_case : Union[str, Any] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
_snake_case : Any = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
_snake_case : Dict = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
_snake_case : Union[str, Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
_snake_case : Dict = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
_snake_case : str = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
_snake_case : int = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
_snake_case : Union[str, Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_snake_case : Optional[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_snake_case : Union[str, Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_snake_case : Tuple = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
_snake_case : Any = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
_snake_case : str = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
_snake_case : List[Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
_snake_case : Optional[int] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
_snake_case : List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
_snake_case : Tuple = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
for key in orig_state_dict.copy().keys():
_snake_case : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : Any = key.split(""".""" )
_snake_case , _snake_case : List[Any] = int(key_split[2] ), int(key_split[4] )
_snake_case : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_snake_case : List[Any] = val[:dim, :]
_snake_case : Union[str, Any] = val[dim : dim * 2, :]
_snake_case : int = val[-dim:, :]
else:
_snake_case : Union[str, Any] = val[:dim]
_snake_case : str = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : int = key.split(""".""" )
_snake_case : Optional[int] = int(key_split[3] )
_snake_case : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
_snake_case : int = val[:dim, :]
_snake_case : Tuple = val[
dim : dim * 2, :
]
_snake_case : int = val[-dim:, :]
else:
_snake_case : Tuple = val[:dim]
_snake_case : Tuple = val[dim : dim * 2]
_snake_case : str = val[-dim:]
else:
_snake_case : Tuple = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_snake_case : Any = val.squeeze_()
else:
_snake_case : Optional[int] = val
return orig_state_dict
def lowercase ( ) -> List[str]:
_snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Optional[Any]:
_snake_case : Any = GroupViTConfig()
_snake_case : List[Any] = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
_snake_case : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
_snake_case : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
_snake_case : List[Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
_snake_case : List[Any] = prepare_img()
_snake_case : int = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
with torch.no_grad():
_snake_case : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
_snake_case : Union[str, Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_snake_case : Union[str, Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
a__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 198
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : str = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'roformer'
def __init__( self : Union[str, Any] , lowerCamelCase__ : List[Any]=50_000 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : int=768 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : int=3_072 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Union[str, Any]=1_536 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : List[str]=1E-12 , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : str=True , **lowerCamelCase__ : str , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = vocab_size
a__ : Optional[Any] = hidden_size if embedding_size is None else embedding_size
a__ : List[str] = hidden_size
a__ : Any = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : List[str] = hidden_act
a__ : Tuple = intermediate_size
a__ : Any = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : Any = type_vocab_size
a__ : int = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : Optional[int] = rotary_value
a__ : str = use_cache
class A__ ( A__ ):
"""simple docstring"""
@property
def _UpperCamelCase( self : List[Any] ):
if self.task == "multiple-choice":
a__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : List[str] = {0: "batch", 1: "sequence"}
a__ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 37
|
from __future__ import annotations
_UpperCamelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_UpperCamelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = []
__lowerCAmelCase : Union[str, Any] = len(lowercase__ )
for i in range(lowercase__ ):
__lowerCAmelCase : float = -1
for j in range(i + 1 , lowercase__ ):
if arr[i] < arr[j]:
__lowerCAmelCase : str = arr[j]
break
result.append(lowercase__ )
return result
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Any = []
for i, outer in enumerate(lowercase__ ):
__lowerCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__lowerCAmelCase : List[Any] = inner
break
result.append(lowercase__ )
return result
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = len(lowercase__ )
__lowerCAmelCase : list[float] = []
__lowerCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(lowercase__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__lowerCAmelCase : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_UpperCamelCase = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 492
| 0
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_UpperCamelCase =chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
_UpperCamelCase ={}
for k, v in state_dict.items():
if "pred_layer" in k:
_UpperCamelCase =v
else:
_UpperCamelCase =v
_UpperCamelCase =chkpt['''params''']
_UpperCamelCase ={n: v for n, v in config.items() if not isinstance(__SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
_UpperCamelCase =chkpt['''dico_word2id''']
_UpperCamelCase ={s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
_UpperCamelCase =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_UpperCamelCase =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_UpperCamelCase =pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 702
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__lowerCamelCase : Optional[int] = TypeVar('T')
class UpperCAmelCase ( Generic[T]):
"""simple docstring"""
lowerCAmelCase_ = 42 # Cache store of keys
lowerCAmelCase_ = 42 # References of the keys in cache
lowerCAmelCase_ = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , UpperCamelCase__ : int ) -> None:
_UpperCamelCase =deque()
_UpperCamelCase =set()
if not n:
_UpperCamelCase =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_UpperCamelCase =n
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCamelCase =self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : str ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 271
| 0
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ReformerTokenizer
UpperCamelCase_ = ReformerTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = True
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().setUp()
lowercase : Optional[Any] =ReformerTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] ='''<s>'''
lowercase : List[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : Any =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase ) , 1000 )
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : List[str] =self.get_tokenizer()
lowercase : int =self.get_rust_tokenizer()
lowercase : Any ='''I was born in 92000, and this is falsé.'''
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase )
lowercase : List[Any] =rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : List[Any] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowercase : Optional[int] =rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : str =tokenizer.encode(UpperCAmelCase )
lowercase : Any =rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : int=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase : Tuple =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
lowercase : List[str] ='''This is a simple input'''
lowercase : List[Any] =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[int] =('''This is a simple input''', '''This is a pair''')
lowercase : List[Any] =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
def A__ ( self : str ) -> int:
'''simple docstring'''
pass
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =ReformerTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowercase : Any =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Dict =tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : str =tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple ='''Hello World!'''
lowercase : Any =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase : Union[str, Any] =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase : List[Any] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Union[str, Any] =''' '''.join(UpperCAmelCase )
lowercase : int =self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='''pt''' )
lowercase : List[str] =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
lowercase : Optional[Any] =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase : str =encoded_sequence['''input_ids'''].shape
lowercase : Any =ReformerModel(UpperCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] ={'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase : Optional[Any] =[
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase , sequences=UpperCAmelCase , )
| 94
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58
| 0
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = list(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
__SCREAMING_SNAKE_CASE : int = "_"
if count > 1:
return False
else:
return "".join(__lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : list[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
while True:
__SCREAMING_SNAKE_CASE : Optional[Any] = ["$"] * len(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = []
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 , len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : str = compare_string(binary[i] , binary[j] )
if k is False:
__SCREAMING_SNAKE_CASE : Optional[Any] = "*"
__SCREAMING_SNAKE_CASE : Union[str, Any] = "*"
temp.append("X" )
for i in range(len(__lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCamelCase ) == 0:
return pi
__SCREAMING_SNAKE_CASE : List[str] = list(set(__lowerCamelCase ) )
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Sequence[float] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for minterm in minterms:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for _ in range(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCamelCase )
return temp
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = list(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = list(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowerCAmelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Dict = [0] * len(__lowerCamelCase )
for i in range(len(chart[0] ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Any = -1
for j in range(len(__lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
__SCREAMING_SNAKE_CASE : Optional[Any] = j
if count == 1:
__SCREAMING_SNAKE_CASE : str = 1
for i in range(len(__lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Tuple = 0
temp.append(prime_implicants[i] )
while True:
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : str = -1
__SCREAMING_SNAKE_CASE : int = 0
for i in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Any = chart[i].count(1 )
if count_n > max_n:
__SCREAMING_SNAKE_CASE : int = count_n
__SCREAMING_SNAKE_CASE : List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
def _lowerCAmelCase ( __lowerCamelCase : list[str] , __lowerCamelCase : list[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[0 for x in range(len(__lowerCamelCase ) )] for x in range(len(__lowerCamelCase ) )]
for i in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = prime_implicants[i].count("_" )
for j in range(len(__lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 1
return chart
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = int(input("Enter the no. of variables\n" ) )
__SCREAMING_SNAKE_CASE : int = [
float(__lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
__SCREAMING_SNAKE_CASE : Dict = decimal_to_binary(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = check(__lowerCamelCase )
print("Prime Implicants are:" )
print(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = prime_implicant_chart(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = selection(__lowerCamelCase , __lowerCamelCase )
print("Essential Prime Implicants are:" )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 447
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = """philschmid/bart-large-cnn-samsum"""
lowerCAmelCase = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCAmelCase = """summarizer"""
lowerCAmelCase = AutoTokenizer
lowerCAmelCase = AutoModelForSeqaSeqLM
lowerCAmelCase = ["""text"""]
lowerCAmelCase = ["""text"""]
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Tuple )->Tuple:
return self.pre_processor(UpperCamelCase , return_tensors="pt" , truncation=UpperCamelCase )
def __snake_case ( self : Dict , UpperCamelCase : List[Any] )->Optional[Any]:
return self.model.generate(**UpperCamelCase )[0]
def __snake_case ( self : Tuple , UpperCamelCase : Dict )->Optional[int]:
return self.pre_processor.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
| 447
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( A : list[int] , A : list[int] , A : list[int] , A : list[list[str]] , A : int , ):
SCREAMING_SNAKE_CASE : Optional[int] = len(A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A , A , )
def UpperCAmelCase ( A : int ):
SCREAMING_SNAKE_CASE : list[list[str]] = []
depth_first_search([] , [] , [] , A , A )
# Print all the boards
for board in boards:
for column in board:
print(A )
print('''''' )
print(len(A ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 527
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase ( A : dict , A : str , A : set , A : set , A : dict , A : dict , A : PriorityQueue , A : dict , A : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
SCREAMING_SNAKE_CASE : str = cst_fwd.get(A , np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
SCREAMING_SNAKE_CASE : Tuple = new_cost_f
SCREAMING_SNAKE_CASE : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
SCREAMING_SNAKE_CASE : Union[str, Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase ( A : str , A : str , A : dict , A : dict ):
SCREAMING_SNAKE_CASE : Dict = -1
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : Any = {source: 0}
SCREAMING_SNAKE_CASE : Tuple = {destination: 0}
SCREAMING_SNAKE_CASE : Union[str, Any] = {source: None}
SCREAMING_SNAKE_CASE : Optional[int] = {destination: None}
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = queue_forward.get()
visited_forward.add(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = queue_backward.get()
visited_backward.add(A )
SCREAMING_SNAKE_CASE : Tuple = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
SCREAMING_SNAKE_CASE : Optional[int] = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
SCREAMING_SNAKE_CASE : int = shortest_distance
return shortest_path_distance
lowerCAmelCase_ : int = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowerCAmelCase_ : str = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527
| 1
|
import os
import sys
import unittest
A__: Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A__: Any = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
A__: Optional[int] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = get_test_to_tester_mapping(__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = get_test_to_tester_mapping(__lowerCamelCase )
UpperCamelCase__: Any = {"BertModelTest": "BertModelTester"}
UpperCamelCase__: int = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: int = get_model_to_test_mapping(__lowerCamelCase )
UpperCamelCase__: List[Any] = get_model_to_test_mapping(__lowerCamelCase )
UpperCamelCase__: Optional[int] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
UpperCamelCase__: int = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = get_model_to_tester_mapping(__lowerCamelCase )
UpperCamelCase__: str = get_model_to_tester_mapping(__lowerCamelCase )
UpperCamelCase__: Dict = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
UpperCamelCase__: Optional[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
| 221
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=7 , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: int=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[Any]=99 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Union[str, Any]=4 , __lowerCamelCase: Any=37 , __lowerCamelCase: List[str]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=512 , __lowerCamelCase: Union[str, Any]=16 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Any=3 , __lowerCamelCase: Any=4 , __lowerCamelCase: str=None , ):
'''simple docstring'''
UpperCamelCase__: List[Any] = parent
UpperCamelCase__: Union[str, Any] = 13
UpperCamelCase__: int = 7
UpperCamelCase__: int = True
UpperCamelCase__: int = True
UpperCamelCase__: Union[str, Any] = True
UpperCamelCase__: str = True
UpperCamelCase__: Optional[Any] = 99
UpperCamelCase__: str = 384
UpperCamelCase__: Dict = 2
UpperCamelCase__: Optional[Any] = 4
UpperCamelCase__: Union[str, Any] = 37
UpperCamelCase__: str = "gelu"
UpperCamelCase__: Union[str, Any] = 0.1
UpperCamelCase__: Union[str, Any] = 0.1
UpperCamelCase__: List[Any] = 512
UpperCamelCase__: Dict = 16
UpperCamelCase__: Union[str, Any] = 2
UpperCamelCase__: Optional[Any] = 0.02
UpperCamelCase__: Optional[int] = 3
UpperCamelCase__: Optional[Any] = 4
UpperCamelCase__: int = 128
UpperCamelCase__: Union[str, Any] = 2
UpperCamelCase__: Optional[int] = 9
UpperCamelCase__: Any = 1
UpperCamelCase__: Optional[Any] = None
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__: Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase__: int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__: str = None
if self.use_token_type_ids:
UpperCamelCase__: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__: str = None
UpperCamelCase__: str = None
UpperCamelCase__: Union[str, Any] = None
if self.use_labels:
UpperCamelCase__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__: List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__: Tuple = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: str = TFConvBertModel(config=__lowerCamelCase )
UpperCamelCase__: Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__: str = [input_ids, input_mask]
UpperCamelCase__: str = model(__lowerCamelCase )
UpperCamelCase__: Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = TFConvBertForMaskedLM(config=__lowerCamelCase )
UpperCamelCase__: Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: str = self.num_labels
UpperCamelCase__: Any = TFConvBertForSequenceClassification(config=__lowerCamelCase )
UpperCamelCase__: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.num_choices
UpperCamelCase__: Dict = TFConvBertForMultipleChoice(config=__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Tuple = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Dict = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase__: List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.num_labels
UpperCamelCase__: str = TFConvBertForTokenClassification(config=__lowerCamelCase )
UpperCamelCase__: Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
UpperCamelCase__: List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
): List[Any] = config_and_inputs
UpperCamelCase__: Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = TFConvBertModelTester(self )
UpperCamelCase__: Any = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: str = True
UpperCamelCase__: Union[str, Any] = True
if hasattr(__lowerCamelCase , "use_cache" ):
UpperCamelCase__: int = True
UpperCamelCase__: List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Optional[Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase__: List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = model_class(__lowerCamelCase )
UpperCamelCase__: List[str] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
UpperCamelCase__: str = os.path.join(__lowerCamelCase , "saved_model" , "1" )
UpperCamelCase__: Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
UpperCamelCase__: Any = model(__lowerCamelCase )
if self.is_encoder_decoder:
UpperCamelCase__: int = outputs["encoder_hidden_states"]
UpperCamelCase__: str = outputs["encoder_attentions"]
else:
UpperCamelCase__: str = outputs["hidden_states"]
UpperCamelCase__: Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
UpperCamelCase__: Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Tuple = True
UpperCamelCase__: int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
UpperCamelCase__: Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase: List[Any] ):
UpperCamelCase__: Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__: Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase: List[str] ):
UpperCamelCase__: str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__: int = True
UpperCamelCase__: Tuple = False
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: List[Any] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase__: Optional[int] = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
UpperCamelCase__: List[str] = model_class(__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__: List[Any] = True
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: int = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
UpperCamelCase__: List[Any] = True
UpperCamelCase__: Any = True
UpperCamelCase__: int = model_class(__lowerCamelCase )
UpperCamelCase__: Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase__: Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__: Dict = model(__lowerCamelCase )[0]
UpperCamelCase__: Tuple = [1, 6, 768]
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase__: Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 221
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = KandinskyVaaInpaintPipeline
UpperCAmelCase__ : List[str] = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : List[str] = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : int = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Dict = False
@property
def __lowercase ( self ) -> List[Any]:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return 3_2
@property
def __lowercase ( self ) -> Optional[Any]:
return self.time_input_dim
@property
def __lowercase ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> List[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_a : Optional[int] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Optional[Any] = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Tuple:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> List[str]:
torch.manual_seed(0 )
_a : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = self.dummy_unet
_a : List[Any] = self.dummy_movq
_a : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> str:
_a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_a : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_a : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Any = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Optional[int] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : Dict = 0
if str(_a ).startswith('''mps''' ):
_a : List[str] = torch.manual_seed(_a )
else:
_a : Optional[int] = torch.Generator(device=_a ).manual_seed(_a )
_a : List[str] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Union[str, Any]:
_a : str = '''cpu'''
_a : Union[str, Any] = self.get_dummy_components()
_a : Optional[int] = self.pipeline_class(**_a )
_a : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[Any] = pipe(**self.get_dummy_inputs(_a ) )
_a : Optional[Any] = output.images
_a : Any = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Dict = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : int = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> List[Any]:
_a : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
_a : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Dict = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : int = 0
_a : Dict = '''a hat'''
_a : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Union[str, Any] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Union[str, Any] = pipeline(
image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Tuple = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a : Optional[Any] = logging.get_logger(__name__)
__a : Optional[Any] = torch.device("""cpu""")
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = dct.pop(lowercase )
__lowercase = val
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
for k in state_dict.keys():
__lowercase = k
if ".pwconv" in k:
__lowercase = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
__lowercase = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
__lowercase = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
__lowercase = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
__lowercase = k_new.split('''.''' )
if ls[2].isdigit():
__lowercase = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
__lowercase = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__lowercase = 1000
__lowercase = '''huggingface/label-files'''
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(lowercase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__lowercase = [3, 3, 6, 4]
__lowercase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__lowercase = [3, 3, 9, 6]
__lowercase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__lowercase = [4, 3, 10, 5]
__lowercase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__lowercase = [4, 4, 12, 6]
__lowercase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
__lowercase = torch.hub.load_state_dict_from_url(lowercase , map_location='''cpu''' , check_hash=lowercase )
else:
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = checkpoint
__lowercase = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
__lowercase = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
__lowercase = prepare_img()
__lowercase = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
__lowercase = processor(images=lowercase , return_tensors='''pt''' )
# compare outputs from both models
__lowercase = get_expected_output(lowercase )
__lowercase = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
__a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__a : Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 534
| 0
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""image_processor"""]
UpperCamelCase__ = """SamImageProcessor"""
def __init__( self , UpperCAmelCase_) -> Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
lowercase__: List[Any] = self.image_processor
lowercase__: Optional[int] = -10
lowercase__: str = self.image_processor.size["longest_edge"]
def __call__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> BatchEncoding:
'''simple docstring'''
lowercase__: Union[str, Any] = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# pop arguments that are not used in the foward but used nevertheless
lowercase__: Dict = encoding_image_processor["original_sizes"]
if hasattr(UpperCAmelCase_ , "numpy"): # Checks if Torch or TF tensor
lowercase__: Any = original_sizes.numpy()
lowercase__ , lowercase__ , lowercase__: Optional[int] = self._check_and_preprocess_points(
input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , input_boxes=UpperCAmelCase_ , )
lowercase__: Optional[int] = self._normalize_and_convert(
UpperCAmelCase_ , UpperCAmelCase_ , input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , input_boxes=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , )
return encoding_image_processor
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="pt" , ) -> Tuple:
'''simple docstring'''
if input_points is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
lowercase__: str = [
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , original_sizes[0]) for point in input_points
]
else:
lowercase__: int = [
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , UpperCAmelCase_)
for point, original_size in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowercase__ , lowercase__: Dict = self._pad_points_and_labels(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Dict = np.array(UpperCAmelCase_)
if input_labels is not None:
lowercase__: List[str] = np.array(UpperCAmelCase_)
if input_boxes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
lowercase__: List[str] = [
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , original_sizes[0] , is_bounding_box=UpperCAmelCase_)
for box in input_boxes
]
else:
lowercase__: str = [
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , UpperCAmelCase_ , is_bounding_box=UpperCAmelCase_)
for box, original_size in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
lowercase__: Dict = np.array(UpperCAmelCase_)
if input_boxes is not None:
if return_tensors == "pt":
lowercase__: Tuple = torch.from_numpy(UpperCAmelCase_)
# boxes batch size of 1 by default
lowercase__: Union[str, Any] = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowercase__: Tuple = tf.convert_to_tensor(UpperCAmelCase_)
# boxes batch size of 1 by default
lowercase__: Optional[Any] = tf.expand_dims(UpperCAmelCase_ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowercase__: Tuple = torch.from_numpy(UpperCAmelCase_)
# point batch size of 1 by default
lowercase__: Tuple = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowercase__: Tuple = tf.convert_to_tensor(UpperCAmelCase_)
# point batch size of 1 by default
lowercase__: Optional[int] = tf.expand_dims(UpperCAmelCase_ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"input_points": input_points})
if input_labels is not None:
if return_tensors == "pt":
lowercase__: Union[str, Any] = torch.from_numpy(UpperCAmelCase_)
# point batch size of 1 by default
lowercase__: Optional[Any] = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowercase__: Dict = tf.convert_to_tensor(UpperCAmelCase_)
# point batch size of 1 by default
lowercase__: List[Any] = tf.expand_dims(UpperCAmelCase_ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels})
return encoding_image_processor
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_) -> List[str]:
'''simple docstring'''
lowercase__: Any = max([point.shape[0] for point in input_points])
lowercase__: Union[str, Any] = []
for i, point in enumerate(UpperCAmelCase_):
if point.shape[0] != expected_nb_points:
lowercase__: Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowercase__: List[str] = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(UpperCAmelCase_)
lowercase__: Optional[Any] = processed_input_points
return input_points, input_labels
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False) -> np.ndarray:
'''simple docstring'''
lowercase__ , lowercase__: List[str] = original_size
lowercase__ , lowercase__: Tuple = self.image_processor._get_preprocess_shape(UpperCAmelCase_ , longest_edge=UpperCAmelCase_)
lowercase__: str = deepcopy(UpperCAmelCase_).astype(UpperCAmelCase_)
if is_bounding_box:
lowercase__: Any = coords.reshape(-1 , 2 , 2)
lowercase__: List[str] = coords[..., 0] * (new_w / old_w)
lowercase__: Tuple = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase__: Tuple = coords.reshape(-1 , 4)
return coords
def __lowercase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , ) -> int:
'''simple docstring'''
if input_points is not None:
if hasattr(UpperCAmelCase_ , "numpy"): # Checks for TF or Torch tensor
lowercase__: int = input_points.numpy().tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or not isinstance(input_points[0] , UpperCAmelCase_):
raise ValueError("Input points must be a list of list of floating points.")
lowercase__: str = [np.array(UpperCAmelCase_) for input_point in input_points]
else:
lowercase__: Optional[int] = None
if input_labels is not None:
if hasattr(UpperCAmelCase_ , "numpy"):
lowercase__: Union[str, Any] = input_labels.numpy().tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or not isinstance(input_labels[0] , UpperCAmelCase_):
raise ValueError("Input labels must be a list of list integers.")
lowercase__: List[Any] = [np.array(UpperCAmelCase_) for label in input_labels]
else:
lowercase__: List[str] = None
if input_boxes is not None:
if hasattr(UpperCAmelCase_ , "numpy"):
lowercase__: int = input_boxes.numpy().tolist()
if (
not isinstance(UpperCAmelCase_ , UpperCAmelCase_)
or not isinstance(input_boxes[0] , UpperCAmelCase_)
or not isinstance(input_boxes[0][0] , UpperCAmelCase_)
):
raise ValueError("Input boxes must be a list of list of list of floating points.")
lowercase__: Dict = [np.array(UpperCAmelCase_).astype(np.floataa) for box in input_boxes]
else:
lowercase__: int = None
return input_points, input_labels, input_boxes
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Any = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCAmelCase_))
def __lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Optional[Any]:
'''simple docstring'''
return self.image_processor.post_process_masks(*UpperCAmelCase_ , **UpperCAmelCase_)
| 120
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """ernie_m"""
UpperCamelCase__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCAmelCase_ = 250_002 , UpperCAmelCase_ = 768 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 3_072 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 514 , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1E-0_5 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , **UpperCAmelCase_ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Tuple = max_position_embeddings
lowercase__: str = initializer_range
lowercase__: List[Any] = layer_norm_eps
lowercase__: List[str] = classifier_dropout
lowercase__: Optional[Any] = is_decoder
lowercase__: Tuple = act_dropout
| 120
| 1
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =tempfile.mkdtemp()
lowerCamelCase__: Any =8
# DPR tok
lowerCamelCase__: Tuple =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase__: int =os.path.join(self.tmpdirname , "dpr_tokenizer")
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Any =os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
lowerCamelCase__: str =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Tuple =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase__: Any ={"unk_token": "<unk>"}
lowerCamelCase__: str =os.path.join(self.tmpdirname , "bart_tokenizer")
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Dict =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: List[str] =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
})
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.get_dummy_dataset()
lowerCamelCase__: int =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
lowerCamelCase__: int =dataset
lowerCamelCase__: str =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : bool) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: int =self.get_dummy_dataset()
lowerCamelCase__: int =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , "dataset")
lowerCamelCase__: Tuple =os.path.join(self.tmpdirname , "index.faiss")
dataset.get_index("embeddings").save(os.path.join(self.tmpdirname , "index.faiss"))
dataset.drop_index("embeddings")
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset"))
del dataset
lowerCamelCase__: Dict =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCamelCase__: Tuple =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_) , )
return retriever
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)],
})
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT)
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index")
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr")
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb"))
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl")
lowerCamelCase__: str ={sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , "wb"))
lowerCamelCase__: Dict =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowerCamelCase__: Any =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer())
return retriever
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =1
lowerCamelCase__: List[Any] =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__: Union[str, Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
lowerCamelCase__: List[Any] =self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[str] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Any =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =1
lowerCamelCase__: Optional[Any] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: str =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[Any] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Any =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =1
lowerCamelCase__: Dict =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
lowerCamelCase__: str =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: str =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =1
lowerCamelCase__: Optional[int] =self.get_dummy_legacy_index_retriever()
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["text", "title"])
self.assertEqual(len(doc_dicts[0]["text"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["text"][0] , "bar") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: int =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Tuple =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
import torch
lowerCamelCase__: Union[str, Any] =1
lowerCamelCase__: Optional[Any] =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__: str =[[5, 7], [10, 11]]
lowerCamelCase__: List[str] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Optional[int] =retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =(
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
lowerCamelCase__: Optional[Any] =retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors="pt" , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase__: Dict =1
lowerCamelCase__: Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_)
lowerCamelCase__: List[Any] =[[5, 7], [10, 11]]
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: List[Any] =retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_)
self.assertEqual(
len(UpperCAmelCase_) , 6) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")) , UpperCAmelCase_) # check for doc token related keys in dictionary.
| 59
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def a ( a ) ->List[Any]:
'''simple docstring'''
if hor == 128:
SCREAMING_SNAKE_CASE = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
SCREAMING_SNAKE_CASE = (32, 128, 256)
SCREAMING_SNAKE_CASE = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
SCREAMING_SNAKE_CASE = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
SCREAMING_SNAKE_CASE = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
SCREAMING_SNAKE_CASE = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 6_5536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
SCREAMING_SNAKE_CASE = UNetaDModel(**a )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(a )
hf_value_function.load_state_dict(a )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , '''w''' ) as f:
json.dump(a , a )
def a ( ) ->Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 6_5536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
SCREAMING_SNAKE_CASE = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = UNetaDModel(**a )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(a )
hf_value_function.load_state_dict(a )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(a , a )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 201
| 0
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
_lowercase : Optional[Any] = precision
_lowercase : Dict = ceil(precision / 14 )
_lowercase : int = 426_880 * Decimal(10_005 ).sqrt()
_lowercase : Optional[Any] = 1
_lowercase : Union[str, Any] = 13_591_409
_lowercase : Optional[int] = Decimal(__UpperCAmelCase )
for k in range(1 ,__UpperCAmelCase ):
_lowercase : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 283
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
return "".join(sorted(__UpperCAmelCase ) )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
return word_by_signature[signature(__UpperCAmelCase )]
SCREAMING_SNAKE_CASE = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 283
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ):
def update_area_of_max_square(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__a : Optional[int] = update_area_of_max_square(UpperCamelCase__ , col + 1 )
__a : Union[str, Any] = update_area_of_max_square(row + 1 , col + 1 )
__a : str = update_area_of_max_square(row + 1 , UpperCamelCase__ )
if mat[row][col]:
__a : List[Any] = 1 + min([right, diagonal, down] )
__a : Any = max(largest_square_area[0] , UpperCamelCase__ )
return sub_problem_sol
else:
return 0
__a : Any = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__a : str = update_area_of_max_square_using_dp_array(UpperCamelCase__ , col + 1 , UpperCamelCase__ )
__a : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCamelCase__ )
__a : str = update_area_of_max_square_using_dp_array(row + 1 , UpperCamelCase__ , UpperCamelCase__ )
if mat[row][col]:
__a : Optional[Any] = 1 + min([right, diagonal, down] )
__a : Optional[Any] = max(largest_square_area[0] , UpperCamelCase__ )
__a : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
__a : Dict = [0]
__a : str = [[-1] * cols for _ in range(UpperCamelCase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCamelCase__ )
return largest_square_area[0]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ):
__a : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
__a : Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__a : Dict = dp_array[row][col + 1]
__a : Any = dp_array[row + 1][col + 1]
__a : Dict = dp_array[row + 1][col]
if mat[row][col] == 1:
__a : Any = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__a : List[str] = max(dp_array[row][col] , UpperCamelCase__ )
else:
__a : Optional[int] = 0
return largest_square_area
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ):
__a : int = [0] * (cols + 1)
__a : List[str] = [0] * (cols + 1)
__a : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__a : List[str] = current_row[col + 1]
__a : Any = next_row[col + 1]
__a : List[str] = next_row[col]
if mat[row][col] == 1:
__a : Optional[int] = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__a : Union[str, Any] = max(current_row[col] , UpperCamelCase__ )
else:
__a : str = 0
__a : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 476
|
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( UpperCamelCase__: str = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
A = BeautifulSoup(requests.get(UpperCamelCase__ ).text , """html.parser""" )
A = soup.findAll("""h1""" )
A = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase__ , UpperCamelCase__ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 641
| 0
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __lowercase ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : str = SpeechTaTokenizer(A )
UpperCAmelCase__ : Union[str, Any] = AddedToken("""<mask>""" ,lstrip=A ,rstrip=A )
UpperCAmelCase__ : List[Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[str] ,A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = """this is a test"""
UpperCAmelCase__ : str = """this is a test"""
return input_text, output_text
def __lowercase ( self : Optional[Any] ,A : Optional[int] ,A : Optional[Any]=False ,A : Union[str, Any]=20 ,A : Any=5 ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_input_output_texts(A )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(A ,add_special_tokens=A )
UpperCAmelCase__ : int = tokenizer.decode(A ,clean_up_tokenization_spaces=A )
return text, ids
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = """<pad>"""
UpperCAmelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-4] ,"""œ""" )
self.assertEqual(vocab_keys[-2] ,"""<mask>""" )
self.assertEqual(vocab_keys[-1] ,"""<ctc_blank>""" )
self.assertEqual(len(A ) ,81 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase__ : Optional[int] = tokenizer.vocab_size
UpperCAmelCase__ : Dict = len(A )
self.assertNotEqual(A ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase__ : Union[str, Any] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
UpperCAmelCase__ : str = tokenizer.add_tokens(A )
UpperCAmelCase__ : Dict = tokenizer.vocab_size
UpperCAmelCase__ : Optional[int] = len(A )
self.assertNotEqual(A ,0 )
self.assertEqual(A ,A )
self.assertEqual(A ,len(A ) )
self.assertEqual(A ,all_size + len(A ) )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" ,add_special_tokens=A )
self.assertGreaterEqual(len(A ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
UpperCAmelCase__ : Tuple = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
UpperCAmelCase__ : Dict = tokenizer.add_special_tokens(A )
UpperCAmelCase__ : List[Any] = tokenizer.vocab_size
UpperCAmelCase__ : Optional[Any] = len(A )
self.assertNotEqual(A ,0 )
self.assertEqual(A ,A )
self.assertEqual(A ,len(A ) )
self.assertEqual(A ,all_size_a + len(A ) )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" ,add_special_tokens=A )
self.assertGreaterEqual(len(A ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : Dict = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(A ,[SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A ,[SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(A )
# fmt: off
self.assertListEqual(A ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCAmelCase__ : str = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A ,[SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
UpperCAmelCase__ : Tuple = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="""microsoft/speecht5_asr""" ,revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" ,sequences=A ,)
| 717
|
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = TypeVar('DatasetType', Dataset, IterableDataset)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase , __UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , stopping_strategy=__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase , __UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , axis=__UpperCamelCase )
| 194
| 0
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCAmelCase : nn.ModuleList , UpperCAmelCase : nn.ModuleList , UpperCAmelCase : List[int] ):
lowerCAmelCase__ :List[str] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_lowercase ) == len(_lowercase ), F'''{len(_lowercase )} != {len(_lowercase )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a : str = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a : Any = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def snake_case__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
try:
lowerCAmelCase__ :Optional[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(_lowercase ) )
def snake_case__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ):
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(_lowercase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case__ ( UpperCAmelCase : Union[str, PreTrainedModel] , UpperCAmelCase : Union[str, Path] = "student" , UpperCAmelCase : Union[int, None] = None , UpperCAmelCase : Union[int, None] = None , UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[Any] , ):
lowerCAmelCase__ :str = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(_lowercase , _lowercase ):
AutoTokenizer.from_pretrained(_lowercase ).save_pretrained(_lowercase ) # purely for convenience
lowerCAmelCase__ :Tuple = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).eval()
else:
assert isinstance(_lowercase , _lowercase ), F'''teacher must be a model or string got type {type(_lowercase )}'''
lowerCAmelCase__ :Union[str, Any] = teacher.config.to_diff_dict()
try:
lowerCAmelCase__ :List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase__ :Optional[Any] = teacher_e
if d is None:
lowerCAmelCase__ :List[Any] = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
lowerCAmelCase__ :Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase__ :Dict = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase__ :Tuple = teacher_e
if d is None:
lowerCAmelCase__ :Any = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_lowercase )
# Copy weights
lowerCAmelCase__ :Union[str, Any] = teacher.config_class(**_lowercase )
lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_config(_lowercase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase__ :int = student.load_state_dict(teacher.state_dict() , strict=_lowercase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase__ :Any = list(range(_lowercase ) ), list(range(_lowercase ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(_lowercase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase__ :List[int] = pick_layers_to_copy(_lowercase , _lowercase )
if d_layers_to_copy is None:
lowerCAmelCase__ :List[int] = pick_layers_to_copy(_lowercase , _lowercase )
try:
if hasattr(
_lowercase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _lowercase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _lowercase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _lowercase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _lowercase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _lowercase )
copy_layers(teacher.decoder.block , student.decoder.block , _lowercase )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
lowerCAmelCase__ :str = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(_lowercase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 145
|
"""simple docstring"""
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCAmelCase : int = 6
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[str] = 19_01
UpperCAmelCase : Tuple = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCAmelCase : Tuple = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCAmelCase : Optional[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCAmelCase : Union[str, Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCAmelCase : Union[str, Any] = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 595
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowercase ( datasets.BeamBasedBuilder ):
def _snake_case ( self) -> List[Any]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string')}) , supervised_keys=__snake_case , )
def _snake_case ( self , _snake_case , _snake_case) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()})]
def _snake_case ( self , _snake_case , _snake_case) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__snake_case)
class lowercase ( datasets.BeamBasedBuilder ):
def _snake_case ( self) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string')})}) , supervised_keys=__snake_case , )
def _snake_case ( self , _snake_case , _snake_case) -> Optional[Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()})
]
def _snake_case ( self , _snake_case , _snake_case) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__snake_case)
def SCREAMING_SNAKE_CASE( ) -> Any:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def SCREAMING_SNAKE_CASE( ) -> Any:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class lowercase ( __SCREAMING_SNAKE_CASE ):
@require_beam
def _snake_case ( self) -> Dict:
UpperCAmelCase_ : Any = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ : List[str] = DummyBeamDataset(cache_dir=__snake_case , beam_runner='DirectRunner')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , F"""{builder.name}-train.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string')}))
UpperCAmelCase_ : int = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __snake_case)
self.assertEqual(dset['train'].info.splits['train'].num_examples , __snake_case)
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , 'dataset_info.json')))
del dset
@require_beam
def _snake_case ( self) -> Optional[Any]:
import apache_beam as beam
UpperCAmelCase_ : Optional[Any] = beam.io.parquetio.WriteToParquet
UpperCAmelCase_ : Dict = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ : Tuple = DummyBeamDataset(cache_dir=__snake_case , beam_runner='DirectRunner')
with patch('apache_beam.io.parquetio.WriteToParquet') as write_parquet_mock:
UpperCAmelCase_ : Optional[Any] = partial(__snake_case , num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__snake_case , builder.name , 'default' , '0.0.0' , F"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertTrue(
os.path.exists(
os.path.join(
__snake_case , builder.name , 'default' , '0.0.0' , F"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string')}))
UpperCAmelCase_ : int = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __snake_case)
self.assertEqual(dset['train'].info.splits['train'].num_examples , __snake_case)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content']) , sorted(['foo', 'bar', 'foobar']))
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , 'dataset_info.json')))
del dset
@require_beam
def _snake_case ( self) -> int:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ : Any = DummyBeamDataset(cache_dir=__snake_case)
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare)
@require_beam
def _snake_case ( self) -> int:
UpperCAmelCase_ : int = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ : Tuple = NestedBeamDataset(cache_dir=__snake_case , beam_runner='DirectRunner')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , F"""{builder.name}-train.arrow""")))
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string')})}))
UpperCAmelCase_ : Tuple = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __snake_case)
self.assertEqual(dset['train'].info.splits['train'].num_examples , __snake_case)
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1])
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , 'default' , '0.0.0' , 'dataset_info.json')))
del dset
| 705
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase ( a_ ):
_lowerCamelCase : List[str]= "Wav2Vec2FeatureExtractor"
_lowerCamelCase : Any= "AutoTokenizer"
def __init__( self , _snake_case , _snake_case) -> List[str]:
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ : int = self.feature_extractor
UpperCAmelCase_ : str = False
@classmethod
def _snake_case ( cls , _snake_case , **_snake_case) -> List[Any]:
try:
return super().from_pretrained(_snake_case , **_snake_case)
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _snake_case , )
UpperCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(_snake_case , **_snake_case)
UpperCAmelCase_ : List[str] = WavaVecaCTCTokenizer.from_pretrained(_snake_case , **_snake_case)
return cls(feature_extractor=_snake_case , tokenizer=_snake_case)
def __call__( self , *_snake_case , **_snake_case) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
UpperCAmelCase_ : List[str] = kwargs.pop('raw_speech')
else:
UpperCAmelCase_ : Dict = kwargs.pop('audio' , _snake_case)
UpperCAmelCase_ : str = kwargs.pop('sampling_rate' , _snake_case)
UpperCAmelCase_ : List[str] = kwargs.pop('text' , _snake_case)
if len(_snake_case) > 0:
UpperCAmelCase_ : List[str] = args[0]
UpperCAmelCase_ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
UpperCAmelCase_ : Union[str, Any] = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case)
if text is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(_snake_case , **_snake_case)
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self , *_snake_case , **_snake_case) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_snake_case , **_snake_case)
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('input_features' , _snake_case)
UpperCAmelCase_ : Dict = kwargs.pop('labels' , _snake_case)
if len(_snake_case) > 0:
UpperCAmelCase_ : Optional[Any] = args[0]
UpperCAmelCase_ : int = args[1:]
if input_features is not None:
UpperCAmelCase_ : Dict = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case)
if labels is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.pad(_snake_case , **_snake_case)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ : Union[str, Any] = labels['input_ids']
return input_features
def _snake_case ( self , *_snake_case , **_snake_case) -> Dict:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def _snake_case ( self , *_snake_case , **_snake_case) -> Tuple:
return self.tokenizer.decode(*_snake_case , **_snake_case)
@contextmanager
def _snake_case ( self) -> Tuple:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Tuple = self.tokenizer
yield
UpperCAmelCase_ : Tuple = self.feature_extractor
UpperCAmelCase_ : Tuple = False
| 471
| 0
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __magic_name__ ( lowercase , lowercase ):
if "xprophetnet" in prophetnet_checkpoint_path:
SCREAMING_SNAKE_CASE_: Optional[int] =XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase , output_loading_info=lowercase )
else:
SCREAMING_SNAKE_CASE_: Tuple =ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =ProphetNetForConditionalGeneration.from_pretrained(
lowercase , output_loading_info=lowercase )
SCREAMING_SNAKE_CASE_: List[str] =["""key_proj""", """value_proj""", """query_proj"""]
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =key.split(""".""" )
if attributes[0] == "lm_head":
SCREAMING_SNAKE_CASE_: Any =prophet
SCREAMING_SNAKE_CASE_: Dict =prophet_old
else:
SCREAMING_SNAKE_CASE_: Optional[int] =prophet.prophetnet
SCREAMING_SNAKE_CASE_: Optional[Any] =prophet_old.model
SCREAMING_SNAKE_CASE_: Dict =False
for attribute in attributes:
if attribute in mapping:
SCREAMING_SNAKE_CASE_: str =mapping[attribute]
if not hasattr(lowercase , lowercase ) and len(lowercase ) > 0:
SCREAMING_SNAKE_CASE_: int =attribute
elif hasattr(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE_: Any =old_model.weight
logger.info(f'''{attribute} is initialized.''' )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE_: Any =old_model.bias
logger.info(f'''{attribute} is initialized''' )
SCREAMING_SNAKE_CASE_: Dict =True
break
elif attribute in special_keys and hasattr(lowercase , """in_proj_weight""" ):
SCREAMING_SNAKE_CASE_: List[Any] =old_model.in_proj_weight.shape[0] // 3
SCREAMING_SNAKE_CASE_: Optional[Any] =getattr(lowercase , lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
SCREAMING_SNAKE_CASE_: Optional[int] =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
SCREAMING_SNAKE_CASE_: Tuple =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
SCREAMING_SNAKE_CASE_: List[str] =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
SCREAMING_SNAKE_CASE_: str =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
SCREAMING_SNAKE_CASE_: Dict =nn.Parameter(old_model.embed_positions.weight[:512, :] )
SCREAMING_SNAKE_CASE_: str =True
break
if attribute.isdigit():
SCREAMING_SNAKE_CASE_: int =model[int(lowercase )]
SCREAMING_SNAKE_CASE_: str =old_model[int(lowercase )]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =getattr(lowercase , lowercase )
if old_attribute == "":
SCREAMING_SNAKE_CASE_: Dict =old_model
else:
if not hasattr(lowercase , lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
SCREAMING_SNAKE_CASE_: List[Any] =getattr(lowercase , lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 409
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : int = 'efficientformer'
def __init__( self : str , lowerCAmelCase : List[int] = [3, 2, 6, 4] , lowerCAmelCase : List[int] = [48, 96, 224, 448] , lowerCAmelCase : List[bool] = [True, True, True, True] , lowerCAmelCase : int = 448 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 4 , lowerCAmelCase : int = 7 , lowerCAmelCase : int = 5 , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 4 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : float = 1E-5 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.0_2 , lowerCAmelCase : float = 1E-12 , lowerCAmelCase : int = 224 , lowerCAmelCase : float = 1E-05 , **lowerCAmelCase : int , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: str =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Any =hidden_sizes
SCREAMING_SNAKE_CASE_: List[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Tuple =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Any =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[Any] =patch_size
SCREAMING_SNAKE_CASE_: List[str] =num_channels
SCREAMING_SNAKE_CASE_: str =depths
SCREAMING_SNAKE_CASE_: Optional[int] =mlp_expansion_ratio
SCREAMING_SNAKE_CASE_: Optional[Any] =downsamples
SCREAMING_SNAKE_CASE_: List[Any] =dim
SCREAMING_SNAKE_CASE_: Optional[Any] =key_dim
SCREAMING_SNAKE_CASE_: Optional[Any] =attention_ratio
SCREAMING_SNAKE_CASE_: Optional[int] =resolution
SCREAMING_SNAKE_CASE_: List[str] =pool_size
SCREAMING_SNAKE_CASE_: Any =downsample_patch_size
SCREAMING_SNAKE_CASE_: int =downsample_stride
SCREAMING_SNAKE_CASE_: List[Any] =downsample_pad
SCREAMING_SNAKE_CASE_: List[str] =drop_path_rate
SCREAMING_SNAKE_CASE_: str =num_metaad_blocks
SCREAMING_SNAKE_CASE_: Dict =distillation
SCREAMING_SNAKE_CASE_: List[Any] =use_layer_scale
SCREAMING_SNAKE_CASE_: Tuple =layer_scale_init_value
SCREAMING_SNAKE_CASE_: List[Any] =image_size
SCREAMING_SNAKE_CASE_: Tuple =batch_norm_eps
| 409
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : str = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : int = '''AutoImageProcessor'''
SCREAMING_SNAKE_CASE : Any = '''AutoTokenizer'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
lowerCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 514
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 514
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __a ( _UpperCamelCase: int , _UpperCamelCase: str , _UpperCamelCase: Optional[int] , _UpperCamelCase: Tuple , _UpperCamelCase: str ) -> np.ndarray:
"""simple docstring"""
_snake_case = int(np.ceil((x_end - xa) / step_size ) )
_snake_case = np.zeros((n + 1,) )
_snake_case = ya
_snake_case = xa
for k in range(__lowerCAmelCase ):
_snake_case = y[k] + step_size * ode_func(__lowerCAmelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.dual_guided(
prompt=snake_case , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Any = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image(
prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(snake_case , generator=snake_case , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 352
| 0
|
'''simple docstring'''
from math import pi, sqrt, tan
def __snake_case ( UpperCAmelCase_ : float ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __snake_case ( UpperCAmelCase_ : float ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def __snake_case ( UpperCAmelCase_ : float ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
lowerCamelCase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(UpperCAmelCase_ , 2 ) * torus_radius * tube_radius
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def __snake_case ( UpperCAmelCase_ : float ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
lowerCamelCase_ = (sidea + sidea + sidea) / 2
lowerCamelCase_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def __snake_case ( UpperCAmelCase_ : float ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : float ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("""\nSurface Areas of various geometric shapes: \n""")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 705
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCamelCase_ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sgugger/tiny-distilbert-classification"
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase , only_pretrain_model=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase )
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase , [config] )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase )
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase , [config] )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase )
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase , [config] )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "patrickvonplaten/t5-tiny-random"
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase )
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase , configs=[config] )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase )
lowerCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase , save_to_csv=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase , "env.csv" ) , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase , "env.csv" ) ).exists() )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase ):
self.assertTrue(hasattr(UpperCamelCase , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase , "current" ) )
self.assertTrue(hasattr(UpperCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase , "log.txt" ) , log_print=UpperCamelCase , trace_memory_line_by_line=UpperCamelCase , eager_mode=UpperCamelCase , multi_process=UpperCamelCase , )
lowerCamelCase_ = TensorFlowBenchmark(UpperCamelCase )
lowerCamelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase , "log.txt" ) ).exists() )
| 445
| 0
|
'''simple docstring'''
_UpperCAmelCase : str = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 107
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0.2 , lowerCAmelCase_=0.2 ):
'''simple docstring'''
a_ : str = bp_numa
a_ : Optional[Any] = bp_numa
a_ : List[Any] = bp_numa
a_ : Dict = conva_get[:2]
a_ : int = conva_get[2]
a_ : List[str] = size_pa
a_ : Tuple = rate_w
a_ : List[str] = rate_t
a_ : Any = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
a_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a_ : str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a_ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
a_ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
a_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : str = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCAmelCase_ , """wb""" ) as f:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
print(f'''Model saved: {save_path}''' )
@classmethod
def _lowerCAmelCase ( cls , lowerCAmelCase_ ):
'''simple docstring'''
with open(lowerCAmelCase_ , """rb""" ) as f:
a_ : Dict = pickle.load(lowerCAmelCase_ ) # noqa: S301
a_ : Optional[Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
a_ : List[str] = model_dic.get("""size_pooling1""" )
a_ : Any = model_dic.get("""num_bp1""" )
a_ : Optional[Any] = model_dic.get("""num_bp2""" )
a_ : str = model_dic.get("""num_bp3""" )
a_ : Tuple = model_dic.get("""rate_weight""" )
a_ : int = model_dic.get("""rate_thre""" )
# create model instance
a_ : Union[str, Any] = CNN(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# modify model parameter
a_ : int = model_dic.get("""w_conv1""" )
a_ : List[str] = model_dic.get("""wkj""" )
a_ : Optional[Any] = model_dic.get("""vji""" )
a_ : int = model_dic.get("""thre_conv1""" )
a_ : List[Any] = model_dic.get("""thre_bp2""" )
a_ : List[Any] = model_dic.get("""thre_bp3""" )
return conv_ins
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
return round(lowerCAmelCase_ , 3 )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : int = convs[0]
a_ : str = convs[1]
a_ : str = np.shape(lowerCAmelCase_ )[0]
# get the data slice of original image data, data_focus
a_ : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase_ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase_ ):
a_ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCAmelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
a_ : Union[str, Any] = []
a_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCAmelCase_ ):
a_ : int = []
for i_focus in range(len(lowerCAmelCase_ ) ):
a_ : Union[str, Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCAmelCase_ ) )
a_ : Dict = np.asmatrix(lowerCAmelCase_ ).reshape(
lowerCAmelCase_ , lowerCAmelCase_ )
data_featuremap.append(lowerCAmelCase_ )
# expanding the data slice to One dimenssion
a_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCAmelCase_ ) )
a_ : Optional[Any] = np.asarray(lowerCAmelCase_ )
return focus_list, data_featuremap
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="average_pool" ):
'''simple docstring'''
a_ : List[Any] = len(featuremaps[0] )
a_ : Tuple = int(size_map / size_pooling )
a_ : Optional[Any] = []
for i_map in range(len(lowerCAmelCase_ ) ):
a_ : Tuple = featuremaps[i_map]
a_ : Tuple = []
for i_focus in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
for j_focus in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
a_ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCAmelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCAmelCase_ ) )
a_ : Optional[int] = np.asmatrix(lowerCAmelCase_ ).reshape(lowerCAmelCase_ , lowerCAmelCase_ )
featuremap_pooled.append(lowerCAmelCase_ )
return featuremap_pooled
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = []
for i in range(len(lowerCAmelCase_ ) ):
a_ : Optional[int] = np.shape(data[i] )
a_ : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
a_ : str = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCAmelCase_ )
a_ : str = np.asarray(lowerCAmelCase_ )
return data_expanded
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : List[str] = np.asarray(lowerCAmelCase_ )
a_ : Optional[int] = np.shape(lowerCAmelCase_ )
a_ : List[str] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : int = []
a_ : Dict = 0
for i_map in range(lowerCAmelCase_ ):
a_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
for j in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
a_ : Any = pd_pool[
i_pool
]
a_ : Optional[Any] = i_pool + 1
a_ : Optional[int] = np.multiply(
lowerCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCAmelCase_ )
return pd_all
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=bool ):
'''simple docstring'''
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCAmelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCAmelCase_ )) )
a_ : str = 0
a_ : str = []
a_ : List[Any] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
a_ : List[Any] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCAmelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
a_ : List[Any] = np.asmatrix(datas_train[p] )
a_ : Optional[int] = np.asarray(datas_teach[p] )
a_ , a_ : Any = self.convolute(
lowerCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ : Union[str, Any] = self.pooling(lowerCAmelCase_ , self.size_poolinga )
a_ : List[Any] = np.shape(lowerCAmelCase_ )
a_ : str = self._expand(lowerCAmelCase_ )
a_ : List[str] = data_bp_input
a_ : int = np.dot(lowerCAmelCase_ , self.vji.T ) - self.thre_bpa
a_ : List[str] = self.sig(lowerCAmelCase_ )
a_ : Any = np.dot(lowerCAmelCase_ , self.wkj.T ) - self.thre_bpa
a_ : Tuple = self.sig(lowerCAmelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a_ : Optional[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCAmelCase_ , (1 - bp_outa) ) )
a_ : List[Any] = np.multiply(
np.dot(lowerCAmelCase_ , self.wkj ) , np.multiply(lowerCAmelCase_ , (1 - bp_outa) ) )
a_ : List[str] = np.dot(lowerCAmelCase_ , self.vji )
a_ : List[str] = pd_i_all / (self.size_poolinga * self.size_poolinga)
a_ : List[Any] = pd_conva_pooled.T.getA().tolist()
a_ : Tuple = self._calculate_gradient_from_pool(
lowerCAmelCase_ , lowerCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
a_ : Union[str, Any] = self._expand_mat(pd_conva_all[k_conv] )
a_ : Optional[int] = self.rate_weight * np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
a_ : Union[str, Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
a_ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a_ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a_ : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
a_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a_ : str = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a_ : List[str] = rp + 1
a_ : Union[str, Any] = error_count / patterns
all_mse.append(lowerCAmelCase_ )
def draw_error():
a_ : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCAmelCase_ , """+-""" )
plt.plot(lowerCAmelCase_ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCAmelCase_ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : str = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCAmelCase_ )) )
for p in range(len(lowerCAmelCase_ ) ):
a_ : List[Any] = np.asmatrix(datas_test[p] )
a_ , a_ : Tuple = self.convolute(
lowerCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ : List[Any] = self.pooling(lowerCAmelCase_ , self.size_poolinga )
a_ : Dict = self._expand(lowerCAmelCase_ )
a_ : int = data_bp_input
a_ : int = bp_outa * self.vji.T - self.thre_bpa
a_ : Optional[Any] = self.sig(lowerCAmelCase_ )
a_ : Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
a_ : Tuple = self.sig(lowerCAmelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
a_ : int = [list(map(self.do_round , lowerCAmelCase_ ) ) for each in produce_out]
return np.asarray(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] = np.asmatrix(lowerCAmelCase_ )
a_ , a_ : List[Any] = self.convolute(
lowerCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ : str = self.pooling(lowerCAmelCase_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 577
| 0
|
from __future__ import annotations
from statistics import mean
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: int = [0] * no_of_processes
lowerCamelCase_: Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_UpperCAmelCase ):
lowerCamelCase_: Optional[int] = burst_time[i]
lowerCamelCase_: list[int] = []
lowerCamelCase_: int = 0
lowerCamelCase_: Any = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCamelCase_: List[str] = []
lowerCamelCase_: int = -1
for i in range(_UpperCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
lowerCamelCase_: Optional[int] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCamelCase_: List[str] = i
total_time += burst_time[target_process]
completed += 1
lowerCamelCase_: List[str] = 0
lowerCamelCase_: List[Any] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Dict = [0] * no_of_processes
for i in range(_UpperCAmelCase ):
lowerCamelCase_: int = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowercase : Optional[Any] = 4
lowercase : Optional[Any] = [2, 5, 3, 7]
lowercase : List[str] = [0, 0, 0, 0]
lowercase : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase : int = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 584
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 584
| 1
|
from __future__ import annotations
def lowercase__ ( A_: int , A_: int ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =[]
create_all_state(1 , lowerCamelCase__ , lowerCamelCase__ , [] , lowerCamelCase__ )
return result
def lowercase__ ( A_: int , A_: int , A_: int , A_: list[int] , A_: list[list[int]] , ) -> Optional[int]:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowerCamelCase__ , total_number - level + 2 ):
current_list.append(lowerCamelCase__ )
create_all_state(i + 1 , lowerCamelCase__ , level - 1 , lowerCamelCase__ , lowerCamelCase__ )
current_list.pop()
def lowercase__ ( A_: list[list[int]] ) -> Any:
"""simple docstring"""
for i in total_list:
print(*lowerCamelCase__ )
if __name__ == "__main__":
__A = 4
__A = 2
__A = generate_all_combinations(n, k)
print_all_state(total_list)
| 68
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__)
@add_end_docstrings(
snake_case_ , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def a__ ( self , A ) -> np.ndarray:
if self.framework == "tf":
A: Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A: List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def a__ ( self , A ) -> np.ndarray:
A: Union[str, Any] = self.get_masked_index(A )
A: List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def a__ ( self , A ) -> str:
if isinstance(A , A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def a__ ( self , A , A=None , **A ) -> Dict[str, GenericTensor]:
if return_tensors is None:
A: List[Any] = self.framework
A: str = self.tokenizer(A , return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def a__ ( self , A ) -> int:
A: List[str] = self.model(**A )
A: Any = model_inputs["""input_ids"""]
return model_outputs
def a__ ( self , A , A=5 , A=None ) -> Tuple:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A: Union[str, Any] = target_ids.shape[0]
A: int = model_outputs["""input_ids"""][0]
A: List[Any] = model_outputs["""logits"""]
if self.framework == "tf":
A: Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A: Optional[int] = outputs.numpy()
A: str = outputs[0, masked_index, :]
A: List[Any] = stable_softmax(A , axis=-1 )
if target_ids is not None:
A: int = tf.gather_nd(tf.squeeze(A , 0 ) , target_ids.reshape(-1 , 1 ) )
A: Tuple = tf.expand_dims(A , 0 )
A: str = tf.math.top_k(A , k=A )
A , A: str = topk.values.numpy(), topk.indices.numpy()
else:
A: Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A: str = outputs[0, masked_index, :]
A: Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
A: str = probs[..., target_ids]
A , A: Optional[Any] = probs.topk(A )
A: List[str] = []
A: Optional[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A: List[str] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A: str = input_ids.numpy().copy()
if target_ids is not None:
A: Optional[Any] = target_ids[p].tolist()
A: List[Any] = p
# Filter padding out:
A: str = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A: Tuple = self.tokenizer.decode(A , skip_special_tokens=A )
A: Union[str, Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def a__ ( self , A , A=None ) -> Any:
if isinstance(A , A ):
A: Any = [targets]
try:
A: Tuple = self.tokenizer.get_vocab()
except Exception:
A: Tuple = {}
A: Any = []
for target in targets:
A: Optional[int] = vocab.get(A , A )
if id_ is None:
A: List[Any] = self.tokenizer(
A , add_special_tokens=A , return_attention_mask=A , return_token_type_ids=A , max_length=1 , truncation=A , )["""input_ids"""]
if len(A ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
A: List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
A: Union[str, Any] = list(set(A ) )
if len(A ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
A: Any = np.array(A )
return target_ids
def a__ ( self , A=None , A=None ) -> Dict:
A: List[Any] = {}
if targets is not None:
A: Dict = self.get_target_ids(A , A )
A: Tuple = target_ids
if top_k is not None:
A: Any = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , A , *A , **A ) -> Any:
A: Any = super().__call__(A , **A )
if isinstance(A , A ) and len(A ) == 1:
return outputs[0]
return outputs
| 135
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666
| 0
|
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = '''naver-clova-ix/donut-base'''
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = DonutProcessor.from_pretrained(lowercase_)
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
_UpperCamelCase = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
_UpperCamelCase = self.processor.tokenajson(lowercase_)
self.assertDictEqual(lowercase_ , lowercase_)
| 547
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase__ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase__ ( a__ , a__ ) ->List[Any]:
'''simple docstring'''
if args.student_type == "roberta":
_UpperCamelCase = False
elif args.student_type == "gpt2":
_UpperCamelCase = False
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
if args.student_type == "roberta":
_UpperCamelCase = False
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=a__ , required=a__ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=a__ , required=a__ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=a__ , choices=["distilbert", "roberta", "gpt2"] , required=a__ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=a__ , required=a__ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=a__ , type=a__ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=a__ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=a__ , required=a__ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=a__ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=a__ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=a__ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=a__ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=a__ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=a__ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=a__ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=a__ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=a__ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=a__ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=a__ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=a__ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=a__ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=a__ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=a__ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=a__ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=a__ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=a__ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=a__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=a__ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=a__ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=a__ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=a__ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=a__ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=a__ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=a__ , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=a__ , default=4_000 , help="Checkpoint interval." )
_UpperCamelCase = parser.parse_args()
sanity_checks(a__ )
# ARGS #
init_gpu_params(a__ )
set_seed(a__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(a__ ) , a__ , indent=4 )
git_log(args.dump_path )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = MODEL_CLASSES[args.student_type]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_UpperCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_UpperCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_UpperCamelCase = tokenizer.all_special_tokens.index(a__ )
_UpperCamelCase = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
_UpperCamelCase = special_tok_ids
_UpperCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
_UpperCamelCase = pickle.load(a__ )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
_UpperCamelCase = pickle.load(a__ )
_UpperCamelCase = np.maximum(a__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_UpperCamelCase = 0.0 # do not predict special tokens
_UpperCamelCase = torch.from_numpy(a__ )
else:
_UpperCamelCase = None
_UpperCamelCase = LmSeqsDataset(params=a__ , data=a__ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
_UpperCamelCase = student_config_class.from_pretrained(args.student_config )
_UpperCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
_UpperCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=a__ )
else:
_UpperCamelCase = student_model_class(a__ )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
_UpperCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=a__ )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a__ , a__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a__ , a__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_UpperCamelCase = Distiller(
params=a__ , dataset=a__ , token_probs=a__ , student=a__ , teacher=a__ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 547
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list[list[int]] , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCAmelCase_ ( _lowerCamelCase: list[list[int]] , _lowerCamelCase: list[int] , _lowerCamelCase: int ):
# Base Case
if curr_ind == len(_lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_lowerCamelCase ) ):
if valid_connection(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# Insert current vertex into path as next transition
__SCREAMING_SNAKE_CASE : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
__SCREAMING_SNAKE_CASE : str = -1
return False
def lowerCAmelCase_ ( _lowerCamelCase: list[list[int]] , _lowerCamelCase: int = 0 ):
__SCREAMING_SNAKE_CASE : Tuple = [-1] * (len(_lowerCamelCase ) + 1)
# initialize start and end of path with starting index
__SCREAMING_SNAKE_CASE : Tuple = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , 1 ) else []
| 178
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''t5'''
_A : Optional[Any] = ['''past_key_values''']
_A : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[str]=3_2_1_2_8 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : str=6_4 , lowerCAmelCase__ : Tuple=2_0_4_8 , lowerCAmelCase__ : Optional[Any]=6 , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Any=8 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : Dict=1_2_8 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : str=1E-6 , lowerCAmelCase__ : str=1.0 , lowerCAmelCase__ : Optional[Any]="relu" , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[Any]=1 , **lowerCAmelCase__ : Dict , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : str = d_model
__SCREAMING_SNAKE_CASE : str = d_kv
__SCREAMING_SNAKE_CASE : Optional[Any] = d_ff
__SCREAMING_SNAKE_CASE : Optional[Any] = num_layers
__SCREAMING_SNAKE_CASE : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[int] = num_heads
__SCREAMING_SNAKE_CASE : Any = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : int = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Optional[int] = dropout_rate
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
__SCREAMING_SNAKE_CASE : Any = feed_forward_proj
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : List[str] = self.feed_forward_proj.split("""-""" )
__SCREAMING_SNAKE_CASE : Tuple = act_info[-1]
__SCREAMING_SNAKE_CASE : int = act_info[0] == """gated"""
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__SCREAMING_SNAKE_CASE : Optional[Any] = """past_encoder_sequence + sequence"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch"""}
__SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
return common_inputs
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return 1_3
| 178
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = 384
if "tiny" in model_name:
__magic_name__ = [3, 3, 9, 3]
__magic_name__ = [96, 192, 384, 768]
if "small" in model_name:
__magic_name__ = [3, 3, 27, 3]
__magic_name__ = [96, 192, 384, 768]
if "base" in model_name:
__magic_name__ = [3, 3, 27, 3]
__magic_name__ = [128, 256, 512, 1024]
__magic_name__ = 512
if "large" in model_name:
__magic_name__ = [3, 3, 27, 3]
__magic_name__ = [192, 384, 768, 1536]
__magic_name__ = 768
if "xlarge" in model_name:
__magic_name__ = [3, 3, 27, 3]
__magic_name__ = [256, 512, 1024, 2048]
__magic_name__ = 1024
# set label information
__magic_name__ = 150
__magic_name__ = '''huggingface/label-files'''
__magic_name__ = '''ade20k-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
__magic_name__ = {v: k for k, v in idalabel.items()}
__magic_name__ = ConvNextConfig(
depths=a , hidden_sizes=a , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__magic_name__ = UperNetConfig(
backbone_config=a , auxiliary_in_channels=a , num_labels=a , idalabel=a , labelaid=a , )
return config
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
__magic_name__ = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( a , a , a ) -> int:
'''simple docstring'''
__magic_name__ = dct.pop(a )
__magic_name__ = val
def UpperCamelCase ( a , a , a ) -> Any:
'''simple docstring'''
__magic_name__ = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__magic_name__ = model_name_to_url[model_name]
__magic_name__ = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )['''state_dict''']
__magic_name__ = get_upernet_config(a )
__magic_name__ = UperNetForSemanticSegmentation(a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__magic_name__ = state_dict.pop(a )
if "bn" in key:
__magic_name__ = key.replace('''bn''' , '''batch_norm''' )
__magic_name__ = val
# rename keys
__magic_name__ = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
model.load_state_dict(a )
# verify on image
__magic_name__ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__magic_name__ = Image.open(requests.get(a , stream=a ).raw ).convert('''RGB''' )
__magic_name__ = SegformerImageProcessor()
__magic_name__ = processor(a , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__magic_name__ = model(a )
if model_name == "upernet-convnext-tiny":
__magic_name__ = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
__magic_name__ = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
__magic_name__ = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
__magic_name__ = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
__magic_name__ = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(a )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 432
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE :Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__SCREAMING_SNAKE_CASE :str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__SCREAMING_SNAKE_CASE :int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self : Optional[Any] , a__ : List[Any] , a__ : List[str] , a__ : List[Any] ):
__magic_name__ = ZeroShotClassificationPipeline(
model=a__ , tokenizer=a__ , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self : str , a__ : int , a__ : List[str] ):
__magic_name__ = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ )], '''scores''': [ANY(a__ )]} )
# No kwarg
__magic_name__ = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ )], '''scores''': [ANY(a__ )]} )
__magic_name__ = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ )], '''scores''': [ANY(a__ )]} )
__magic_name__ = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ ), ANY(a__ )], '''scores''': [ANY(a__ ), ANY(a__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__magic_name__ = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ ), ANY(a__ )], '''scores''': [ANY(a__ ), ANY(a__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__magic_name__ = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ )], '''scores''': [ANY(a__ )]} )
# https://github.com/huggingface/transformers/issues/13846
__magic_name__ = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ ), ANY(a__ )], '''scores''': [ANY(a__ ), ANY(a__ )]}
for i in range(1 )
] , )
__magic_name__ = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ ), ANY(a__ )], '''scores''': [ANY(a__ ), ANY(a__ )]}
for i in range(2 )
] , )
with self.assertRaises(a__ ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(a__ ):
classifier(a__ , candidate_labels='''politics''' )
with self.assertRaises(a__ ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(a__ ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=a__ )
with self.assertRaises(a__ ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(a__ ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=a__ , )
self.run_entailment_id(a__ )
def snake_case__ ( self : Optional[Any] , a__ : Pipeline ):
__magic_name__ = zero_shot_classifier.model.config
__magic_name__ = config.labelaid
__magic_name__ = zero_shot_classifier.entailment_id
__magic_name__ = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__magic_name__ = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__magic_name__ = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__magic_name__ = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__magic_name__ = original_labelaid
self.assertEqual(a__ , zero_shot_classifier.entailment_id )
@require_torch
def snake_case__ ( self : str ):
__magic_name__ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def snake_case__ ( self : Dict ):
__magic_name__ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__magic_name__ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def snake_case__ ( self : Any ):
__magic_name__ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__magic_name__ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def snake_case__ ( self : Any ):
__magic_name__ = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__magic_name__ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__magic_name__ = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=a__ , )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def snake_case__ ( self : int ):
__magic_name__ = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__magic_name__ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__magic_name__ = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=a__ , )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 432
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCamelCase_( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def UpperCamelCase_( lowerCamelCase_ ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
_lowercase : List[Any] = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('Generating prime q...' )
_lowercase : int = rabinMiller.generate_large_prime(lowerCamelCase_ )
_lowercase : Tuple = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
_lowercase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
_lowercase : List[Any] = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
_lowercase : Optional[Any] = (n, e)
_lowercase : List[Any] = (n, d)
return (public_key, private_key)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_lowercase : List[str] = generate_key(lowerCamelCase_ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 718
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
SCREAMING_SNAKE_CASE : Optional[int] = (
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("utf-8").split()
)
SCREAMING_SNAKE_CASE : Any = "|".join(sys.argv[1:])
SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(rF"^({joined_dirs}).*?\.py$")
SCREAMING_SNAKE_CASE : List[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 354
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase_ = 128_022
lowercase_ = 128_028
@require_sentencepiece
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Dict = MaMaaaTokenizer
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
super().setUp()
lowercase__ = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowercase__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowercase__ = Path(self.tmpdirname )
save_json(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
lowercase__ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **a : Tuple )-> List[Any]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[str] )-> List[Any]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any:
"""simple docstring"""
lowercase__ = """</s>"""
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(lowerCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2, 3, 4, 5, 6] , )
lowercase__ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
lowercase__ = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , 'This is a test' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
_UpperCamelCase : Any = '''facebook/m2m100_418M'''
_UpperCamelCase : List[Any] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
_UpperCamelCase : Optional[int] = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
_UpperCamelCase : int = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] )-> Any:
"""simple docstring"""
lowercase__ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
lowercase__ = 1
return cls
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 128_063 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Tuple:
"""simple docstring"""
lowercase__ = """en"""
lowercase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
lowercase__ = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
lowercase__ = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCamelCase__ )
lowercase__ = MaMaaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCamelCase__ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = """en"""
lowercase__ = """fr"""
lowercase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors='pt' )
lowercase__ = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase__ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
lowercase__ = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase__ = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
lowercase__ = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase__ = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# en_XX, A, test, EOS
'input_ids': [[128_022, 58, 4_183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128_006,
} , )
| 235
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = ['model.decoder.embed_positions.weights']
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
if "emb" in name:
__UpperCamelCase : Any = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__UpperCamelCase : str = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__UpperCamelCase : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__UpperCamelCase : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__UpperCamelCase : Optional[Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__UpperCamelCase : Tuple = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__UpperCamelCase : List[str] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__UpperCamelCase : Tuple = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__UpperCamelCase : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__UpperCamelCase : Optional[int] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase : Tuple = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCamelCase ( __lowerCAmelCase : OrderedDict , __lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
__UpperCamelCase : Tuple = list(state_dict.keys() )
__UpperCamelCase : List[Any] = {}
for key in keys:
__UpperCamelCase : Optional[Any] = state_dict.pop(__lowerCAmelCase )
__UpperCamelCase : Dict = rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase : Optional[Any] = val[:hidden_size, :]
__UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase : Dict = val
else:
__UpperCamelCase : Any = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( __lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__UpperCamelCase : int = 1024
__UpperCamelCase : Union[str, Any] = 24
__UpperCamelCase : int = 16
elif checkpoint == "medium":
__UpperCamelCase : List[Any] = 1536
__UpperCamelCase : Dict = 48
__UpperCamelCase : Dict = 24
elif checkpoint == "large":
__UpperCamelCase : List[Any] = 2048
__UpperCamelCase : str = 48
__UpperCamelCase : Optional[int] = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__UpperCamelCase : Any = MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str="cpu" ) -> Optional[int]:
__UpperCamelCase : str = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
__UpperCamelCase : int = decoder_config_from_checkpoint(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
__UpperCamelCase : List[Any] = TaEncoderModel.from_pretrained("""t5-base""" )
__UpperCamelCase : Dict = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__UpperCamelCase : List[str] = MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase : Tuple = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__UpperCamelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
__UpperCamelCase : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase : int = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""t5-base""" )
__UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__UpperCamelCase : Any = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
__UpperCamelCase : Tuple = 2048
__UpperCamelCase : int = 2048
# set other default generation config params
__UpperCamelCase : str = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase : List[str] = True
__UpperCamelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
UpperCamelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 269
| 0
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : int , a__ : Any="" , a__ : Union[str, Any]="train" ):
"""simple docstring"""
assert os.path.isdir(a__ )
__snake_case = []
__snake_case = os.listdir(a__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__snake_case = os.path.join(a__ , a__ )
if not os.path.isfile(a__ ):
continue
self.documents.append(a__ )
def __len__(self : Any ):
"""simple docstring"""
return len(self.documents )
def __getitem__(self : List[Any] , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.documents[idx]
__snake_case = document_path.split('''/''' )[-1]
with open(a__ , encoding='''utf-8''' ) as source:
__snake_case = source.read()
__snake_case , __snake_case = process_story(a__ )
return document_name, story_lines, summary_lines
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Optional[int]:
__snake_case = list(filter(lambda snake_case_ : len(snake_case_ ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
__snake_case = [_add_missing_period(snake_case_ ) for line in nonempty_lines]
# gather article lines
__snake_case = []
__snake_case = deque(snake_case_ )
while True:
try:
__snake_case = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(snake_case_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__snake_case = list(filter(lambda snake_case_ : not t.startswith('''@highlight''' ) , snake_case_ ) )
return story_lines, summary_lines
def lowerCamelCase__ ( snake_case_ : Any ) -> Union[str, Any]:
__snake_case = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Tuple ) -> List[Any]:
if len(snake_case_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(snake_case_ )) )
return sequence
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> str:
__snake_case = torch.ones_like(snake_case_ )
__snake_case = sequence == pad_token_id
__snake_case = 0
return mask
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
__snake_case = [tokenizer.encode(snake_case_ ) for line in story_lines]
__snake_case = [token for sentence in story_lines_token_ids for token in sentence]
__snake_case = [tokenizer.encode(snake_case_ ) for line in summary_lines]
__snake_case = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Tuple ) -> Any:
__snake_case = []
for sequence in batch:
__snake_case = -1
__snake_case = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(snake_case_ )
return torch.tensor(snake_case_ )
| 388
|
from __future__ import annotations
import math
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( snake_case_ : int ) -> list[int]:
__snake_case = str(snake_case_ )
__snake_case = [n]
for i in range(1 , len(snake_case_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
if len(str(snake_case_ ) ) > 3:
if not is_prime(int(str(snake_case_ )[-3:] ) ) or not is_prime(int(str(snake_case_ )[:3] ) ):
return False
return True
def lowerCamelCase__ ( snake_case_ : int = 11 ) -> list[int]:
__snake_case = []
__snake_case = 13
while len(snake_case_ ) != count:
if validate(snake_case_ ):
__snake_case = list_truncated_nums(snake_case_ )
if all(is_prime(snake_case_ ) for i in list_nums ):
list_truncated_primes.append(snake_case_ )
num += 2
return list_truncated_primes
def lowerCamelCase__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 388
| 1
|
from math import factorial
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Optional[int] , a : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = real
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Dict = [1] * rank
else:
SCREAMING_SNAKE_CASE : Optional[Any] = rank
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return (
F"{self.real}+"
F"{'+'.join(str(a )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , a )
def __add__( self : Tuple , a : List[str] ) -> List[str]:
"""simple docstring"""
if not isinstance(a , a ):
return Dual(self.real + other , self.duals )
SCREAMING_SNAKE_CASE : List[str] = self.duals.copy()
SCREAMING_SNAKE_CASE : Dict = other.duals.copy()
if len(a ) > len(a ):
o_dual.extend([1] * (len(a ) - len(a )) )
elif len(a ) < len(a ):
s_dual.extend([1] * (len(a ) - len(a )) )
SCREAMING_SNAKE_CASE : int = []
for i in range(len(a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , a )
lowerCamelCase__ =__add__
def __sub__( self : Optional[int] , a : int ) -> Dict:
"""simple docstring"""
return self + other * -1
def __mul__( self : Union[str, Any] , a : int ) -> int:
"""simple docstring"""
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , a )
SCREAMING_SNAKE_CASE : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , a )
lowerCamelCase__ =__mul__
def __truediv__( self : int , a : int ) -> Optional[int]:
"""simple docstring"""
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : int = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , a )
raise ValueError
def __floordiv__( self : List[Any] , a : Optional[int] ) -> List[Any]:
"""simple docstring"""
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : int = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , a )
raise ValueError
def __pow__( self : Optional[Any] , a : Dict ) -> Optional[Any]:
"""simple docstring"""
if n < 0 or isinstance(a , a ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE : List[str] = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase__ ( _a , _a , _a):
if not callable(_a):
raise ValueError("differentiate() requires a function as input for func")
if not isinstance(_a , (float, int)):
raise ValueError("differentiate() requires a float as input for position")
if not isinstance(_a , _a):
raise ValueError("differentiate() requires an int as input for order")
SCREAMING_SNAKE_CASE : Optional[int] = Dual(_a , 1)
SCREAMING_SNAKE_CASE : List[str] = func(_a)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_a)
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase__ ( _a):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 25
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = """"""
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 207
| 0
|
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : str ) -> bool:
UpperCamelCase__ :Dict = get_failure_array(lowercase__ )
# 2) Step through text searching for pattern
UpperCamelCase__ , UpperCamelCase__ :Tuple = 0, 0 # index into text, pattern
while i < len(lowercase__ ):
if pattern[j] == text[i]:
if j == (len(lowercase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase__ :int = failure[j - 1]
continue
i += 1
return False
def A ( lowercase__ : str ) -> list[int]:
UpperCamelCase__ :Any = [0]
UpperCamelCase__ :Dict = 0
UpperCamelCase__ :List[Any] = 1
while j < len(lowercase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase__ :List[str] = failure[i - 1]
continue
j += 1
failure.append(lowercase__ )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase = "abc1abc12"
UpperCamelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase = "ABABX"
UpperCamelCase = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
UpperCamelCase = "AAAB"
UpperCamelCase = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
UpperCamelCase = "abcdabcy"
UpperCamelCase = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
UpperCamelCase = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 383
|
from collections.abc import Callable
import numpy as np
def A ( lowercase__ : Callable , lowercase__ : float , lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> np.ndarray:
UpperCamelCase__ :List[str] = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase__ :int = np.zeros((n + 1,) )
UpperCamelCase__ :Union[str, Any] = ya
UpperCamelCase__ :List[Any] = xa
for k in range(lowercase__ ):
UpperCamelCase__ :List[str] = y[k] + step_size * ode_func(lowercase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383
| 1
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_UpperCAmelCase = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_UpperCAmelCase = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_UpperCAmelCase = '''zero2'''
_UpperCAmelCase = '''zero3'''
_UpperCAmelCase = [ZEROa, ZEROa]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__lowerCAmelCase : Dict = parameterized.to_safe_name("""_""".join(str(__lowercase ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_UpperCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_ ( __lowercase ):
@parameterized.expand(_snake_case , name_func=_snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[Any] , _snake_case : Dict )->int:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : List[str] )->List[str]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@parameterized.expand(_snake_case , name_func=_snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Optional[int] )->Tuple:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] )->List[Any]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[Any] )->Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : str , _snake_case : str , _snake_case : str , _snake_case : int = 10 , _snake_case : bool = True , _snake_case : bool = True , _snake_case : bool = True , )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = models[model]
__lowerCAmelCase : Any = self.run_trainer(
stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , )
self.do_checks(_snake_case )
return output_dir
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : str , _snake_case : int = 10 , _snake_case : int = 1 , _snake_case : bool = True , _snake_case : bool = True , )->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir("""./xxx""" , after=_snake_case )
__lowerCAmelCase : Dict = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__lowerCAmelCase : Optional[Any] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
__lowerCAmelCase : Tuple = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
__lowerCAmelCase : str = self.get_launcher(_snake_case )
__lowerCAmelCase : str = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case , env=self.get_env() )
return output_dir
def UpperCAmelCase__ ( self : List[str] , _snake_case : Any=False )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 504
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCamelCase : Optional[int] = '''zero2'''
_lowerCamelCase : List[Any] = '''zero3'''
_lowerCamelCase : Dict = [ZEROa, ZEROa]
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_lowerCamelCase : Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Any , lowercase : str , lowercase : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_snake_case = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_snake_case = self.get_launcher(lowercase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A ( self : List[str] , lowercase : Any=False ):
'''simple docstring'''
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 686
| 0
|
'''simple docstring'''
__A : Optional[Any] = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 187
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : int = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "vit"
def __init__( self : Optional[int] , __lowerCamelCase : Tuple=768 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : int=1E-12 , __lowerCamelCase : Tuple=224 , __lowerCamelCase : int=16 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=16 , **__lowerCamelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = encoder_stride
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("1.11" )
@property
def a__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1E-4
| 187
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = False ):
'''simple docstring'''
if not arr:
return 0
UpperCAmelCase__ : str = 0 if allow_empty_subarrays else float("""-inf""" )
UpperCAmelCase__ : List[Any] = 0.0
for num in arr:
UpperCAmelCase__ : Optional[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCAmelCase__ : Dict = max(__UpperCamelCase , __UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 65
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Optional[Any] = XCLIPTextConfig()
# derive patch size from model name
__UpperCAmelCase : str = model_name.find("patch" )
__UpperCAmelCase : str = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
__UpperCAmelCase : Optional[int] = XCLIPVisionConfig(patch_size=snake_case__, num_frames=snake_case__ )
if "large" in model_name:
__UpperCAmelCase : Any = 768
__UpperCAmelCase : List[Any] = 3072
__UpperCAmelCase : List[str] = 12
__UpperCAmelCase : Dict = 1024
__UpperCAmelCase : Tuple = 4096
__UpperCAmelCase : Tuple = 16
__UpperCAmelCase : Optional[int] = 24
__UpperCAmelCase : int = 768
__UpperCAmelCase : Optional[Any] = 3072
if model_name == "xclip-large-patch14-16-frames":
__UpperCAmelCase : Union[str, Any] = 336
__UpperCAmelCase : Dict = XCLIPConfig.from_text_vision_configs(snake_case__, snake_case__ )
if "large" in model_name:
__UpperCAmelCase : Optional[Any] = 768
return config
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
# text encoder
if name == "token_embedding.weight":
__UpperCAmelCase : Dict = name.replace("token_embedding.weight", "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
__UpperCAmelCase : str = name.replace("positional_embedding", "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
__UpperCAmelCase : Optional[int] = name.replace("ln_1", "layer_norm1" )
if "ln_2" in name:
__UpperCAmelCase : int = name.replace("ln_2", "layer_norm2" )
if "c_fc" in name:
__UpperCAmelCase : List[str] = name.replace("c_fc", "fc1" )
if "c_proj" in name:
__UpperCAmelCase : Optional[Any] = name.replace("c_proj", "fc2" )
if name.startswith("transformer.resblocks" ):
__UpperCAmelCase : Dict = name.replace("transformer.resblocks", "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
__UpperCAmelCase : Optional[int] = name.replace("attn.out_proj", "self_attn.out_proj" )
if "ln_final" in name:
__UpperCAmelCase : Optional[Any] = name.replace("ln_final", "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
__UpperCAmelCase : Dict = name.replace("visual.class_embedding", "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
__UpperCAmelCase : List[Any] = name.replace("visual.positional_embedding", "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
__UpperCAmelCase : Optional[Any] = name.replace("visual.transformer.resblocks", "vision_model.encoder.layers" )
if "visual.conv1" in name:
__UpperCAmelCase : Tuple = name.replace("visual.conv1", "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
__UpperCAmelCase : List[Any] = name.replace("visual.ln_pre", "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("visual.ln_post", "vision_model.post_layernorm" )
if "visual.proj" in name:
__UpperCAmelCase : int = name.replace("visual.proj", "visual_projection.weight" )
if "text_projection" in name:
__UpperCAmelCase : Optional[Any] = name.replace("text_projection", "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
__UpperCAmelCase : int = name.replace("prompts_visual_proj", "prompts_visual_projection" )
if "prompts_visual_ln" in name:
__UpperCAmelCase : List[str] = name.replace("prompts_visual_ln", "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
__UpperCAmelCase : List[Any] = name.replace("positional", "position" )
if name.startswith("mit.resblocks" ):
__UpperCAmelCase : List[Any] = name.replace("mit.resblocks", "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
__UpperCAmelCase : Any = name.replace("prompts_generator.norm", "prompts_generator.layernorm" )
return name
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Tuple = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
__UpperCAmelCase : Any = key.split("." )
if key.startswith("visual" ):
__UpperCAmelCase : Optional[Any] = key_split[3]
__UpperCAmelCase : str = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__UpperCAmelCase : List[str] = val[
:dim, :
]
__UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
__UpperCAmelCase : Optional[Any] = val[
-dim:, :
]
else:
__UpperCAmelCase : Dict = val[
:dim
]
__UpperCAmelCase : Optional[int] = val[
dim : dim * 2
]
__UpperCAmelCase : Tuple = val[
-dim:
]
else:
if "weight" in key:
__UpperCAmelCase : Optional[int] = val[
:dim, :
]
__UpperCAmelCase : str = val[
dim : dim * 2, :
]
__UpperCAmelCase : Optional[int] = val[
-dim:, :
]
else:
__UpperCAmelCase : str = val[:dim]
__UpperCAmelCase : Dict = val[
dim : dim * 2
]
__UpperCAmelCase : Any = val[-dim:]
elif key.startswith("mit" ):
__UpperCAmelCase : int = key_split[2]
__UpperCAmelCase : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__UpperCAmelCase : int = val[:dim, :]
__UpperCAmelCase : Union[str, Any] = val[dim : dim * 2, :]
__UpperCAmelCase : List[str] = val[-dim:, :]
else:
__UpperCAmelCase : int = val[:dim]
__UpperCAmelCase : Tuple = val[dim : dim * 2]
__UpperCAmelCase : Tuple = val[-dim:]
else:
__UpperCAmelCase : str = key_split[2]
__UpperCAmelCase : List[Any] = config.text_config.hidden_size
if "weight" in key:
__UpperCAmelCase : Dict = val[:dim, :]
__UpperCAmelCase : List[Any] = val[
dim : dim * 2, :
]
__UpperCAmelCase : Dict = val[-dim:, :]
else:
__UpperCAmelCase : int = val[:dim]
__UpperCAmelCase : Tuple = val[
dim : dim * 2
]
__UpperCAmelCase : str = val[-dim:]
else:
__UpperCAmelCase : int = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__UpperCAmelCase : List[str] = val.T
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( snake_case__ ) -> str:
if num_frames == 8:
__UpperCAmelCase : Tuple = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
__UpperCAmelCase : List[str] = "eating_spaghetti.npy"
elif num_frames == 32:
__UpperCAmelCase : List[str] = "eating_spaghetti_32_frames.npy"
__UpperCAmelCase : int = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename=snake_case__, repo_type="dataset", )
__UpperCAmelCase : List[str] = np.load(snake_case__ )
return list(snake_case__ )
def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=False ) -> Dict:
__UpperCAmelCase : List[Any] = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
__UpperCAmelCase : List[str] = model_to_url[model_name]
__UpperCAmelCase : List[str] = 8
if "16-frames" in model_name:
__UpperCAmelCase : Union[str, Any] = 16
elif "shot" in model_name:
__UpperCAmelCase : Any = 32
__UpperCAmelCase : List[Any] = get_xclip_config(snake_case__, snake_case__ )
__UpperCAmelCase : Optional[int] = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
__UpperCAmelCase : List[Any] = "pytorch_model.bin"
gdown.cached_download(snake_case__, snake_case__, quiet=snake_case__ )
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )["model"]
else:
__UpperCAmelCase : Optional[Any] = torch.hub.load_state_dict_from_url(snake_case__ )["model"]
__UpperCAmelCase : List[str] = convert_state_dict(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = XCLIPModel(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase : str = model.load_state_dict(snake_case__, strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__UpperCAmelCase : Optional[Any] = 336 if model_name == "xclip-large-patch14-16-frames" else 224
__UpperCAmelCase : str = VideoMAEImageProcessor(size=snake_case__ )
__UpperCAmelCase : str = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
__UpperCAmelCase : Any = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
__UpperCAmelCase : Any = XCLIPProcessor(image_processor=snake_case__, tokenizer=snake_case__ )
__UpperCAmelCase : Optional[int] = prepare_video(snake_case__ )
__UpperCAmelCase : List[str] = processor(
text=["playing sports", "eating spaghetti", "go shopping"], videos=snake_case__, return_tensors="pt", padding=snake_case__ )
print("Shape of pixel values:", inputs.pixel_values.shape )
with torch.no_grad():
__UpperCAmelCase : Any = model(**snake_case__ )
# Verify outputs
__UpperCAmelCase : Union[str, Any] = outputs.logits_per_video
__UpperCAmelCase : List[Any] = logits_per_video.softmax(dim=1 )
print("Probs:", snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
__UpperCAmelCase : Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__UpperCAmelCase : List[Any] = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
__UpperCAmelCase : Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__UpperCAmelCase : Any = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
__UpperCAmelCase : Tuple = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__UpperCAmelCase : int = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__UpperCAmelCase : Any = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__UpperCAmelCase : Dict = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__UpperCAmelCase : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__UpperCAmelCase : Dict = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__UpperCAmelCase : Union[str, Any] = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__UpperCAmelCase : Optional[int] = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__UpperCAmelCase : Optional[int] = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__UpperCAmelCase : Optional[int] = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__UpperCAmelCase : List[str] = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__UpperCAmelCase : Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__UpperCAmelCase : List[str] = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__UpperCAmelCase : str = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case__, snake_case__, atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(snake_case__, organization="nielsr" )
processor.push_to_hub(snake_case__, organization="nielsr" )
slow_tokenizer.push_to_hub(snake_case__, organization="nielsr" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 382
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__lowercase : Tuple = logging.getLogger()
__lowercase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self , A ):
os.makedirs(A , exist_ok=A )
lowerCamelCase_ : Any = {'''source''': '''What is love ?''', '''target''': '''life'''}
lowerCamelCase_ : List[str] = {'''train''': 1_2, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase_ : List[Any] = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(A , F"""{split}.{field}""" ) , '''w''' ) as f:
f.write(A )
def UpperCAmelCase__ (self , A , A = "pytorch" ):
lowerCamelCase_ : List[str] = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : Tuple = os.path.join(A , '''output''' )
lowerCamelCase_ : Union[str, Any] = os.path.join(A , '''data''' )
self._create_dummy_data(data_dir=A )
lowerCamelCase_ : Tuple = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
lowerCamelCase_ : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A , env=self.get_env() )
lowerCamelCase_ : List[Any] = os.path.join(A , '''metrics.json''' )
with open(A ) as f:
lowerCamelCase_ : List[str] = json.load(A )
return result
@require_torch_gpu
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 357
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowercase , _lowercase=False ) -> int:
'''simple docstring'''
try:
lowerCamelCase_ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase_ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase_ : List[str] = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__lowercase : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_lowercase )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_lowercase )
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_lowercase )
def lowercase_ ( _lowercase=None , _lowercase=None ) -> Any:
'''simple docstring'''
if test_case is None:
return partial(_lowercase , version=_lowercase )
return unittest.skipUnless(is_torch_version('''>=''' , _lowercase ) , F"""test requires torch version >= {version}""" )(_lowercase )
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_lowercase )
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_lowercase )
__lowercase : str = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_lowercase )
class __lowercase ( unittest.TestCase ):
lowerCamelCase : Optional[Any] = True
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp()
@classmethod
def UpperCAmelCase__ (cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCAmelCase__ (self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A )
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = mocks if isinstance(A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Tuple = AcceleratorState()
lowerCamelCase_ : Optional[Any] = tensor[None].clone().to(state.device )
lowerCamelCase_ : Dict = gather(_lowercase ).cpu()
lowerCamelCase_ : List[str] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _lowercase ):
return False
return True
class __lowercase :
def __init__(self , A , A , A ):
lowerCamelCase_ : List[Any] = returncode
lowerCamelCase_ : Optional[int] = stdout
lowerCamelCase_ : List[str] = stderr
async def lowercase_ ( _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
while True:
lowerCamelCase_ : Any = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowercase ) )
lowerCamelCase_ : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Optional[int] = []
def tee(_lowercase , _lowercase , _lowercase , _lowercase="" ):
lowerCamelCase_ : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase , _lowercase , file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowercase : tee(_lowercase , _lowercase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _lowercase : tee(_lowercase , _lowercase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_lowercase , )
return _RunOutput(await p.wait() , _lowercase , _lowercase )
def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=180 , _lowercase=False , _lowercase=True ) -> _RunOutput:
'''simple docstring'''
lowerCamelCase_ : Any = asyncio.get_event_loop()
lowerCamelCase_ : List[str] = loop.run_until_complete(
_stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase ) )
lowerCamelCase_ : List[Any] = ''' '''.join(_lowercase )
if result.returncode > 0:
lowerCamelCase_ : Optional[int] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __lowercase ( _lowercase ):
pass
def lowercase_ ( _lowercase , _lowercase=False ) -> Dict:
'''simple docstring'''
try:
lowerCamelCase_ : List[str] = subprocess.check_output(_lowercase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_lowercase , '''decode''' ):
lowerCamelCase_ : str = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(_lowercase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 357
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 6
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( A ,A ) -> str:
lowercase : Optional[int] = old_name
if "patch_embed" in old_name:
lowercase , lowercase , lowercase : Tuple = old_name.split("." )
if layer == "0":
lowercase : int = old_name.replace("0" ,"convolution1" )
elif layer == "1":
lowercase : List[str] = old_name.replace("1" ,"batchnorm_before" )
elif layer == "3":
lowercase : Dict = old_name.replace("3" ,"convolution2" )
else:
lowercase : Union[str, Any] = old_name.replace("4" ,"batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" ,A ):
lowercase : List[str] = r"\b\d{2}\b"
if bool(re.search(A ,A ) ):
lowercase : str = re.search(r"\d\.\d\d." ,A ).group()
else:
lowercase : int = re.search(r"\d\.\d." ,A ).group()
if int(match[0] ) < 6:
lowercase : str = old_name.replace(A ,"" )
lowercase : List[str] = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1] )
lowercase : Optional[Any] = "intermediate_stages." + trimmed_name
else:
lowercase : str = old_name.replace(A ,"" )
if int(match[2] ) < num_meta4D_last_stage:
lowercase : Optional[int] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2] )
else:
lowercase : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowercase : List[Any] = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
lowercase : str = trimmed_name.replace("norm1" ,"layernorm1" )
elif "norm2" in old_name:
lowercase : Optional[Any] = trimmed_name.replace("norm2" ,"layernorm2" )
elif "fc1" in old_name:
lowercase : Optional[int] = trimmed_name.replace("fc1" ,"linear_in" )
elif "fc2" in old_name:
lowercase : str = trimmed_name.replace("fc2" ,"linear_out" )
lowercase : Dict = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." ,A ):
lowercase : Union[str, Any] = old_name.replace("network" ,"intermediate_stages" )
if "fc" in new_name:
lowercase : Any = new_name.replace("fc" ,"convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase : Optional[Any] = new_name.replace("norm1" ,"batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase : List[str] = new_name.replace("norm2" ,"batchnorm_after" )
if "proj" in new_name:
lowercase : Optional[int] = new_name.replace("proj" ,"projection" )
if "dist_head" in new_name:
lowercase : Tuple = new_name.replace("dist_head" ,"distillation_classifier" )
elif "head" in new_name:
lowercase : Tuple = new_name.replace("head" ,"classifier" )
elif "patch_embed" in new_name:
lowercase : Optional[int] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase : str = new_name.replace("norm" ,"layernorm" )
lowercase : List[Any] = "efficientformer." + new_name
else:
lowercase : Optional[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( A ,A ) -> Optional[Any]:
for key in checkpoint.copy().keys():
lowercase : List[str] = checkpoint.pop(A )
lowercase : int = val
return checkpoint
def _A ( ) -> Optional[int]:
lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : Optional[Any] = Image.open(requests.get(A ,stream=A ).raw )
return image
def _A ( A ,A ,A ,A ) -> List[Any]:
lowercase : Optional[int] = torch.load(A ,map_location="cpu" )["model"]
lowercase : int = EfficientFormerConfig.from_json_file(A )
lowercase : Tuple = EfficientFormerForImageClassificationWithTeacher(A )
lowercase : int = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
lowercase : Optional[int] = config.depths[-1] - config.num_metaad_blocks + 1
lowercase : int = convert_torch_checkpoint(A ,A )
model.load_state_dict(A )
model.eval()
lowercase : List[Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
lowercase : Tuple = prepare_img()
lowercase : Optional[int] = 2_5_6
lowercase : str = 2_2_4
lowercase : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,)
lowercase : Union[str, Any] = processor(images=A ,return_tensors="pt" ).pixel_values
# original processing pipeline
lowercase : Tuple = Compose(
[
Resize(A ,interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(A ),
ToTensor(),
Normalize(A ,A ),
] )
lowercase : List[Any] = image_transforms(A ).unsqueeze(0 )
assert torch.allclose(A ,A )
lowercase : Union[str, Any] = model(A )
lowercase : Any = outputs.logits
lowercase : List[str] = (1, 1_0_0_0)
if "l1" in model_name:
lowercase : Any = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] ,A ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase : List[Any] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] ,A ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase : Optional[int] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' ,commit_message="Add model" ,use_temp_dir=A ,)
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' ,commit_message="Add image processor" ,use_temp_dir=A ,)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 372
| 0
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = 'new-model'
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[Any] = NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''bert-base-cased'''
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''bert-base-cased'''
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForPreTraining.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForCausalLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForCausalLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : str ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForMaskedLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForMaskedLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForSequenceClassification.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : str ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForQuestionAnswering.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
@require_tensorflow_probability
def a (self : List[str] ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(
a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(a__ , a__ )
__snake_case = copy.deepcopy(model.config )
__snake_case = ['''FunnelBaseModel''']
__snake_case = TFAutoModel.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case = TFAutoModel.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def a (self : str ):
"""simple docstring"""
try:
AutoConfig.register('''new-model''' , a__ )
__snake_case = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
auto_class.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = BertModelTester(self ).get_config()
__snake_case = NewModelConfig(**tiny_config.to_dict() )
__snake_case = auto_class.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case = auto_class.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a (self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__snake_case = TFAutoModel.from_pretrained('''bert-base''' )
def a (self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case = TFAutoModel.from_pretrained(a__ , revision='''aaaaaa''' )
def a (self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def a (self : int ):
"""simple docstring"""
with self.assertRaisesRegex(a__ , '''Use `from_pt=True` to load this model''' ):
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__snake_case = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 388
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case_ = ['text', 'image', 'audio']
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Dict:
__snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(snake_case_ , snake_case_ ):
inputs.append(create_inputs(snake_case_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowerCamelCase__ ( snake_case_ : List ) -> int:
__snake_case = []
for output in outputs:
if isinstance(snake_case_ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(snake_case_ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(snake_case_ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE__ :
def a (self : List[str] ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
__snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input , a__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = create_inputs(self.tool.inputs )
__snake_case = self.tool(*a__ )
# There is a single output
if len(self.tool.outputs ) == 1:
__snake_case = [outputs]
self.assertListEqual(output_types(a__ ) , self.tool.outputs )
def a (self : Tuple ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = create_inputs(self.tool.inputs )
__snake_case = self.tool(*a__ )
if not isinstance(a__ , a__ ):
__snake_case = [outputs]
self.assertEqual(len(a__ ) , len(self.tool.outputs ) )
for output, output_type in zip(a__ , self.tool.outputs ):
__snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a__ , a__ ) )
def a (self : int ):
"""simple docstring"""
__snake_case = create_inputs(self.tool.inputs )
__snake_case = []
for _input, input_type in zip(a__ , self.tool.inputs ):
if isinstance(a__ , a__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__snake_case = self.tool(*a__ )
if not isinstance(a__ , a__ ):
__snake_case = [outputs]
self.assertEqual(len(a__ ) , len(self.tool.outputs ) )
| 388
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case ):
if isinstance(__snake_case , np.ndarray ):
return list(tensor.shape )
_UpperCamelCase = tf.shape(__snake_case )
if tensor.shape == tf.TensorShape(__snake_case ):
return dynamic
_UpperCamelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__snake_case )]
def _snake_case ( __snake_case , __snake_case = None , __snake_case = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=__snake_case , name=__snake_case )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=1E-5 , __snake_case=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__snake_case , __snake_case ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
_UpperCamelCase , _UpperCamelCase = tf.nn.moments(__snake_case , axes=[axis] , keepdims=__snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCamelCase = [1] * inputs.shape.rank
_UpperCamelCase = shape_list(__snake_case )[axis]
_UpperCamelCase = tf.reshape(__snake_case , __snake_case )
_UpperCamelCase = tf.reshape(__snake_case , __snake_case )
# Compute layer normalization using the batch_normalization
# function.
_UpperCamelCase = tf.nn.batch_normalization(
__snake_case , __snake_case , __snake_case , offset=__snake_case , scale=__snake_case , variance_epsilon=__snake_case , )
return outputs
def _snake_case ( __snake_case , __snake_case=0 , __snake_case=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCamelCase = tf.shape(__snake_case )
_UpperCamelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCamelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__snake_case , __snake_case )
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , tf.Tensor ):
_UpperCamelCase = tf.convert_to_tensor(__snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCamelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCamelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( __snake_case , __snake_case , __snake_case = "input_ids" ):
tf.debugging.assert_less(
__snake_case , tf.cast(__snake_case , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(__snake_case )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCamelCase = [x for x in data if len(__snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
_UpperCamelCase = np.asarray(__snake_case )
_UpperCamelCase = 1
_UpperCamelCase = np.array_split(__snake_case , __snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCamelCase = np.array_split(__snake_case , __snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__snake_case ):
_UpperCamelCase = chunk_data
else:
_UpperCamelCase = data
def _snake_case ( __snake_case , __snake_case ):
if name in group.attrs:
_UpperCamelCase = [n.decode('''utf8''' ) if hasattr(__snake_case , '''decode''' ) else n for n in group.attrs[name]]
else:
_UpperCamelCase = []
_UpperCamelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__snake_case , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( __snake_case ):
def _expand_single_ad_tensor(__snake_case ):
if isinstance(__snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __snake_case )
| 10
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowerCAmelCase ( UpperCamelCase__ ):
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=0 ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[int] =1.0 if scale is None else scale
SCREAMING_SNAKE_CASE : List[Any] =0.0 if loc is None else loc
super().__init__(snake_case_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=snake_case_ )] )
@property
def __a ( self ) -> Any:
return self.base_dist.mean * self.scale + self.loc
@property
def __a ( self ) -> str:
return self.base_dist.variance * self.scale**2
@property
def __a ( self ) -> Union[str, Any]:
return self.variance.sqrt()
class _lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> None:
super().__init__(**snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =args_dim
SCREAMING_SNAKE_CASE : Any =nn.ModuleList([nn.Linear(snake_case_ , snake_case_ ) for dim in args_dim.values()] )
SCREAMING_SNAKE_CASE : Dict =domain_map
def __a ( self , snake_case_ ) -> Tuple[torch.Tensor]:
SCREAMING_SNAKE_CASE : Dict =[proj(snake_case_ ) for proj in self.proj]
return self.domain_map(*snake_case_ )
class _lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case_ ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE : Tuple =function
def __a ( self , snake_case_ , *snake_case_ ) -> Dict:
return self.function(snake_case_ , *snake_case_ )
class _lowerCAmelCase :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self , snake_case_ = 1 ) -> None:
SCREAMING_SNAKE_CASE : Dict =dim
SCREAMING_SNAKE_CASE : Dict ={k: dim * self.args_dim[k] for k in self.args_dim}
def __a ( self , snake_case_ ) -> Optional[Any]:
if self.dim == 1:
return self.distribution_class(*snake_case_ )
else:
return Independent(self.distribution_class(*snake_case_ ) , 1 )
def __a ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Distribution:
SCREAMING_SNAKE_CASE : Optional[int] =self._base_distribution(snake_case_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(snake_case_ , loc=snake_case_ , scale=snake_case_ , event_dim=self.event_dim )
@property
def __a ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def __a ( self ) -> int:
return len(self.event_shape )
@property
def __a ( self ) -> float:
return 0.0
def __a ( self , snake_case_ ) -> nn.Module:
return ParameterProjection(
in_features=snake_case_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __a ( self , *snake_case_ ) -> int:
raise NotImplementedError()
@staticmethod
def __a ( snake_case_ ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(snake_case_ ) + 4.0 )) / 2.0
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCamelCase__ = StudentT
@classmethod
def __a ( cls , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict =cls.squareplus(snake_case_ ).clamp_min(torch.finfo(scale.dtype ).eps )
SCREAMING_SNAKE_CASE : int =2.0 + cls.squareplus(snake_case_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = {"loc": 1, "scale": 1}
lowerCamelCase__ = Normal
@classmethod
def __a ( cls , snake_case_ , snake_case_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] =cls.squareplus(snake_case_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = {"total_count": 1, "logits": 1}
lowerCamelCase__ = NegativeBinomial
@classmethod
def __a ( cls , snake_case_ , snake_case_ ) -> int:
SCREAMING_SNAKE_CASE : Any =cls.squareplus(snake_case_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __a ( self , snake_case_ ) -> Distribution:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] =distr_args
if self.dim == 1:
return self.distribution_class(total_count=snake_case_ , logits=snake_case_ )
else:
return Independent(self.distribution_class(total_count=snake_case_ , logits=snake_case_ ) , 1 )
def __a ( self , snake_case_ , snake_case_ = None , snake_case_ = None ) -> Distribution:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 258
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase_ : Any = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _lowerCAmelCase(a : int , a : Optional[Any] , a : Tuple , a : Optional[int]=None ) -> int:
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE =XLNetConfig.from_json_file(a )
_SCREAMING_SNAKE_CASE =finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE =finetuning_task
_SCREAMING_SNAKE_CASE =GLUE_TASKS_NUM_LABELS[finetuning_task]
_SCREAMING_SNAKE_CASE =XLNetForSequenceClassification(a )
elif "squad" in finetuning_task:
_SCREAMING_SNAKE_CASE =finetuning_task
_SCREAMING_SNAKE_CASE =XLNetForQuestionAnswering(a )
else:
_SCREAMING_SNAKE_CASE =XLNetLMHeadModel(a )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a , a , a )
# Save pytorch-model
_SCREAMING_SNAKE_CASE =os.path.join(a , a )
_SCREAMING_SNAKE_CASE =os.path.join(a , a )
print(f"""Save PyTorch model to {os.path.abspath(a )}""" )
torch.save(model.state_dict() , a )
print(f"""Save configuration file to {os.path.abspath(a )}""" )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 165
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , **_A ):
'''simple docstring'''
super().__init__(**_A )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , _A , **_A ):
'''simple docstring'''
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
if "candidate_labels" in kwargs:
_SCREAMING_SNAKE_CASE =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_SCREAMING_SNAKE_CASE =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase_ ( self , _A , _A=None , _A="This is a photo of {}." ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(_A )
_SCREAMING_SNAKE_CASE =self.image_processor(images=[image] , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE =candidate_labels
_SCREAMING_SNAKE_CASE =[hypothesis_template.format(_A ) for x in candidate_labels]
_SCREAMING_SNAKE_CASE =self.tokenizer(_A , return_tensors=self.framework , padding=_A )
_SCREAMING_SNAKE_CASE =[text_inputs]
return inputs
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =model_inputs.pop('''candidate_labels''' )
_SCREAMING_SNAKE_CASE =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _A ):
_SCREAMING_SNAKE_CASE =text_inputs[0]
else:
# Batching case.
_SCREAMING_SNAKE_CASE =text_inputs[0][0]
_SCREAMING_SNAKE_CASE =self.model(**_A , **_A )
_SCREAMING_SNAKE_CASE ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =model_outputs.pop('''candidate_labels''' )
_SCREAMING_SNAKE_CASE =model_outputs['''logits'''][0]
if self.framework == "pt":
_SCREAMING_SNAKE_CASE =logits.softmax(dim=-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE =probs.tolist()
if not isinstance(_A , _A ):
_SCREAMING_SNAKE_CASE =[scores]
elif self.framework == "tf":
_SCREAMING_SNAKE_CASE =stable_softmax(_A , axis=-1 )
_SCREAMING_SNAKE_CASE =probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
_SCREAMING_SNAKE_CASE =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result
| 165
| 1
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE (lowerCAmelCase__ , unittest.TestCase ):
_UpperCamelCase : Tuple = GPTaTokenizer
_UpperCamelCase : Optional[int] = GPTaTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : str = {"""add_prefix_space""": True}
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowercase__ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
lowercase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase__ = {'unk_token': '<unk>'}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **a : int )-> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , **a : int )-> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int )-> Tuple:
"""simple docstring"""
lowercase__ = 'lower newer'
lowercase__ = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
lowercase__ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ = 'lower newer'
lowercase__ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase__ = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
lowercase__ = 'lower newer'
# Testing tokenization
lowercase__ = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
lowercase__ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
lowercase__ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
lowercase__ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
lowercase__ = tokenizer.encode(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
lowercase__ = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing the unknown token
lowercase__ = tokens + [rust_tokenizer.unk_token]
lowercase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , *a : List[str] , **a : str )-> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int=15 )-> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# Simple input
lowercase__ = 'This is a simple input'
lowercase__ = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ = ('This is a simple input', 'This is a pair')
lowercase__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='max_length' , )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowercase__ = 'This is a simple input'
lowercase__ = ['This is a simple input looooooooong', 'This is a simple input']
lowercase__ = ('This is a simple input', 'This is a pair')
lowercase__ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer(lowerCAmelCase_ , padding='max_length' , max_length=30 , return_tensors='np' )
lowercase__ = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors='np' )
lowercase__ = tokenizer(*lowerCAmelCase_ , padding='max_length' , max_length=60 , return_tensors='np' )
lowercase__ = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = '$$$'
lowercase__ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )
lowercase__ = 'This is a simple input'
lowercase__ = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer(lowerCAmelCase_ )
lowercase__ = tokenizer(lowerCAmelCase_ )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ = tokenizer.decode(out_s.input_ids )
lowercase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = [self.get_tokenizer(do_lower_case=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ = 'Encode this.'
lowercase__ = 'This one too please.'
lowercase__ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
encoded_sequence += tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
lowercase__ = tokenizer.encode_plus(
lowerCAmelCase_ , lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , )
lowercase__ = encoded_sequence_dict['input_ids']
lowercase__ = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
lowercase__ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase_ )
]
lowercase__ = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_tokenizers
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowerCAmelCase_ )
lowercase__ = 'A photo of a cat'
lowercase__ = tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('test_opt' )
lowercase__ = AutoTokenizer.from_pretrained('./test_opt' )
lowercase__ = tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [2, 250, 1_345, 9, 10, 4_758] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=lowerCAmelCase_ )
lowercase__ = 'A photo of a cat'
lowercase__ = tokenizer.encode(
lowerCAmelCase_ , )
# Same as above
self.assertEqual(lowerCAmelCase_ , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowerCAmelCase_ )
lowercase__ = 'bos'
lowercase__ = tokenizer.get_vocab()['bos']
lowercase__ = 'A photo of a cat'
lowercase__ = tokenizer.encode(
lowerCAmelCase_ , )
# We changed the bos token
self.assertEqual(lowerCAmelCase_ , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('./tok' )
lowercase__ = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
lowercase__ = tokenizer.encode(
lowerCAmelCase_ , )
self.assertEqual(lowerCAmelCase_ , [31_957, 250, 1_345, 9, 10, 4_758] )
| 235
|
import math
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase )
if number < 1:
SCREAMING_SNAKE_CASE_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE_ = int(math.log(number // 3 ,2 ) ) + 2
SCREAMING_SNAKE_CASE_ = [3, 5]
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
for block in range(1 ,UpperCAmelCase ):
for _ in range(UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
A_ = 0
try:
A_ = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 393
| 0
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase_ : Union[str, Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
_lowerCamelCase : Any = os.path.join(_lowerCAmelCase , "words.txt" )
_lowerCamelCase : Dict = ""
with open(_lowerCAmelCase ) as f:
_lowerCamelCase : List[str] = f.readline()
_lowerCamelCase : Optional[Any] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_lowerCamelCase : List[str] = [
word
for word in [sum(ord(_lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 721
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 11
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( _lowercase ) -> list[int]:
__A : List[str] = 2
__A : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowercase )
if n > 1:
factors.append(_lowercase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 520
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase_ ( _lowercase ) -> Any:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__()
__A : str = module
__A : Union[str, Any] = nn.Sequential(
nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , )
__A : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """bigscience/bloom-1b7"""
# Constant values
lowerCamelCase_ : int = 2.109_6595_5269_2574
lowerCamelCase_ : str = """Hello my name is"""
lowerCamelCase_ : Tuple = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCamelCase_ : Any = 1_0
def __UpperCAmelCase( self ):
# Models and tokenizer
__A : Optional[int] = AutoTokenizer.from_pretrained(self.model_name )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
super().setUp()
# Models and tokenizer
__A : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
__A : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
def __UpperCAmelCase( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase( self ):
__A : Any = self.model_abit.config
self.assertTrue(hasattr(__UpperCAmelCase , "quantization_config" ) )
__A : List[str] = config.to_dict()
__A : List[Any] = config.to_diff_dict()
__A : str = config.to_json_string()
def __UpperCAmelCase( self ):
from bitsandbytes.nn import Paramsabit
__A : Optional[Any] = self.model_fpaa.get_memory_footprint()
__A : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__A : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase( self ):
__A : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
__A : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase( self ):
__A : Optional[Any] = BitsAndBytesConfig()
__A : Tuple = True
__A : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , device_map="auto" )
__A : Any = self.tokenizer(self.input_text , return_tensors="pt" )
__A : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase( self ):
with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = BitsAndBytesConfig()
with self.assertRaises(__UpperCAmelCase ):
__A : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __UpperCAmelCase( self ):
with self.assertRaises(__UpperCAmelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__A : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" )
__A : List[Any] = self.model_fpaa.to(torch.floataa )
__A : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__A : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
__A : int = self.model_fpaa.half()
# Check this does not throw an error
__A : Tuple = self.model_fpaa.float()
def __UpperCAmelCase( self ):
__A : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__UpperCAmelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase( cls ):
__A : List[Any] = "t5-small"
__A : Any = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__A : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
__A : str = "Translate in German: Hello, my dog is cute"
def __UpperCAmelCase( self ):
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase( self ):
from transformers import TaForConditionalGeneration
__A : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__A : Union[str, Any] = None
# test with `t5-small`
__A : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
__A : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__A : str = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__A : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
__A : List[str] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__A : Optional[int] = model.generate(**__UpperCAmelCase )
__A : List[Any] = modules
def __UpperCAmelCase( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__A : Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__A : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__A : Union[str, Any] = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__A : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
__A : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__A : int = model.generate(**__UpperCAmelCase )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
super().setUp()
# model_name
__A : Any = "bigscience/bloom-560m"
__A : Tuple = "t5-small"
# Different types of model
__A : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
# Sequence classification model
__A : Any = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
# CausalLM model
__A : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
# Seq2seq model
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
def __UpperCAmelCase( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
super().setUp()
def __UpperCAmelCase( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase( self ):
__A : List[Any] = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__A : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
super().setUp()
def __UpperCAmelCase( self ):
__A : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__A : str = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
__A : Dict = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[str] = "facebook/opt-350m"
super().setUp()
def __UpperCAmelCase( self ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
__A : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__A : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__A : List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCAmelCase ) ):
__A : int = LoRALayer(module.q_proj , rank=16 )
__A : List[str] = LoRALayer(module.k_proj , rank=16 )
__A : str = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__A : int = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__A : str = model.forward(**__UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """gpt2-xl"""
lowerCamelCase_ : Dict = 3.3191_8548_5415_2187
| 520
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ = 'maskformer-swin'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , a_ : List[str]=224 , a_ : Tuple=4 , a_ : List[Any]=3 , a_ : Optional[int]=96 , a_ : Any=[2, 2, 6, 2] , a_ : int=[3, 6, 12, 24] , a_ : Any=7 , a_ : Any=4.0 , a_ : Optional[int]=True , a_ : Optional[Any]=0.0 , a_ : Optional[Any]=0.0 , a_ : Optional[int]=0.1 , a_ : Union[str, Any]="gelu" , a_ : Optional[Any]=False , a_ : Any=0.02 , a_ : Optional[int]=1e-5 , a_ : List[Any]=None , a_ : Dict=None , **a_ : Any , )-> Any:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : str = num_channels
SCREAMING_SNAKE_CASE__ : Dict = embed_dim
SCREAMING_SNAKE_CASE__ : List[str] = depths
SCREAMING_SNAKE_CASE__ : int = len(a_ )
SCREAMING_SNAKE_CASE__ : Dict = num_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = window_size
SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ : Dict = int(embed_dim * 2 ** (len(a_ ) - 1) )
SCREAMING_SNAKE_CASE__ : Tuple = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(a_ ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_aligned_output_features_output_indices(
out_features=a_ , out_indices=a_ , stage_names=self.stage_names )
| 636
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _a ( lowercase__ : List[str] ):
'''simple docstring'''
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 636
| 1
|
from collections import Counter
from timeit import timeit
def lowercase_ ( _UpperCamelCase = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def lowercase_ ( _UpperCamelCase = "" ):
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return True
__lowercase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowercase = {}
for character in lower_case_input_str:
__lowercase = character_freq_dict.get(_UpperCamelCase , 0 ) + 1
__lowercase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase_ ( _UpperCamelCase = "" ):
'''simple docstring'''
print('''\nFor string = ''' , _UpperCamelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(_UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(_UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
a : int = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
a : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 639
|
import doctest
from collections import deque
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self ) -> None:
'''simple docstring'''
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def A ( self ) -> list[float]:
'''simple docstring'''
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(snake_case_ , snake_case_ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(snake_case_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(snake_case_ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(snake_case_ )
for j, item in enumerate(snake_case_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(snake_case_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(snake_case_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 639
| 1
|
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : int = 1_000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
for divide_by_number in range(_SCREAMING_SNAKE_CASE , digit + 1 ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = divide_by_number
else:
has_been_divided.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ = '636036'
SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' )
return results
| 620
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( a , a ):
__snake_case = 0
__snake_case = len(a ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__snake_case = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a ):
return None
__snake_case = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__snake_case = left
__snake_case = point
elif point > right:
__snake_case = right
__snake_case = point
else:
if item < current_item:
__snake_case = point - 1
else:
__snake_case = point + 1
return None
def lowerCamelCase__ ( a , a , a , a ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__snake_case = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(a , a , a , a )
elif point > right:
return interpolation_search_by_recursion(a , a , a , a )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
a , a , a , point - 1 )
else:
return interpolation_search_by_recursion(
a , a , point + 1 , a )
def lowerCamelCase__ ( a ):
if collection != sorted(a ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
_lowercase = 0
if debug == 1:
_lowercase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
_lowercase = 67
_lowercase = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""")
| 356
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_lowercase = pytest.mark.integration
@require_faiss
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : List[Any] ):
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__lowerCAmelCase ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase__ ( self : List[str] ):
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda __lowerCAmelCase , __lowerCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase )
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase__ ( self : Optional[int] ):
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase__ ( self : Dict ):
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__lowerCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase__ ( self : List[str] ):
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__lowerCAmelCase )
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Dict ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search_batch , queries[0] )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
import faiss
__snake_case = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCAmelCase ):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase__ ( self : int ):
import faiss
__snake_case = faiss.IndexFlat(5 )
__snake_case = FaissIndex(custom_index=__lowerCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase__ ( self : Tuple ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
index.save(tmp_file.name )
__snake_case = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase__ ( a ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f'mock://{index_name}'
index.save(a , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(a , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : int ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=__lowerCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(__lowerCAmelCase , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase , request_timeout=3_0 )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
| 356
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __lowerCAmelCase( UpperCamelCase_ ):
__snake_case : Tuple = 4_2
__snake_case : Tuple = 4_2
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __lowerCAmelCase( UpperCamelCase_ ):
__snake_case : Union[str, Any] = 4_2
__snake_case : int = 4_2
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 717
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase__ , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __lowerCAmelCase( lowerCAmelCase__ ):
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE_ :List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE )
else:
raise ValueError('Unsupported framework' )
return masked_index
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_masked_index(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if return_tensors is None:
SCREAMING_SNAKE_CASE_ :Dict = self.framework
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
return model_inputs
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = model_inputs['input_ids']
return model_outputs
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE_ :Tuple = target_ids.shape[0]
SCREAMING_SNAKE_CASE_ :List[Any] = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE_ :List[str] = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ :Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE_ :Dict = outputs.numpy()
SCREAMING_SNAKE_CASE_ :Any = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tf.gather_nd(tf.squeeze(SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
SCREAMING_SNAKE_CASE_ :List[str] = tf.expand_dims(SCREAMING_SNAKE_CASE , 0 )
SCREAMING_SNAKE_CASE_ :Optional[Any] = tf.math.top_k(SCREAMING_SNAKE_CASE , k=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE_ :List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE_ :str = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ :int = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = probs[..., target_ids]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = probs.topk(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = []
SCREAMING_SNAKE_CASE_ :str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE_ :Any = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Optional[Any] = target_ids[p].tolist()
SCREAMING_SNAKE_CASE_ :int = p
# Filter padding out:
SCREAMING_SNAKE_CASE_ :Optional[int] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(SCREAMING_SNAKE_CASE )
result.append(SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [targets]
try:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE_ :List[Any] = {}
SCREAMING_SNAKE_CASE_ :str = []
for target in targets:
SCREAMING_SNAKE_CASE_ :Optional[int] = vocab.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if id_ is None:
SCREAMING_SNAKE_CASE_ :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , max_length=1 , truncation=SCREAMING_SNAKE_CASE , )['input_ids']
if len(SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE_ :Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE_ :List[str] = list(set(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE_ :Optional[int] = np.array(SCREAMING_SNAKE_CASE )
return target_ids
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = {}
if targets is not None:
SCREAMING_SNAKE_CASE_ :Dict = self.get_target_ids(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE_ :str = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 233
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
lowerCamelCase_ = TOKENIZER_CLASSES
else:
lowerCamelCase_ = {tokenizer_name: getattr(lowerCamelCase__ , tokenizer_name + "Fast" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase_ = True
if checkpoint_name is None:
lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase_ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
lowerCamelCase_ = tokenizer_class.from_pretrained(lowerCamelCase__ , force_download=lowerCamelCase__ )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase_ , lowerCamelCase_ = checkpoint.split("/" )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
elif add_prefix:
lowerCamelCase_ = checkpoint
lowerCamelCase_ = dump_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase_ = file_path.split(lowerCamelCase__ )[-1][0]
if next_char == "/":
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
lowerCamelCase_ = tokenizer.save_pretrained(
lowerCamelCase__ , legacy_format=lowerCamelCase__ , filename_prefix=lowerCamelCase__ )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(lowerCamelCase__ )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
__A =parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 463
|
from math import isqrt
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = False
return [i for i in range(2 , lowerCamelCase__ ) if is_prime[i]]
def lowerCamelCase_ ( lowerCamelCase__ = 1_0**8 ):
lowerCamelCase_ = calculate_prime_numbers(max_number // 2 )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowerCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 463
| 1
|
"""simple docstring"""
__lowerCamelCase = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__lowerCamelCase = frozenset(["prompt", "negative_prompt"])
__lowerCamelCase = frozenset([])
__lowerCamelCase = frozenset(["image"])
__lowerCamelCase = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__lowerCamelCase = frozenset(["image"])
__lowerCamelCase = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__lowerCamelCase = frozenset(["prompt", "image", "negative_prompt"])
__lowerCamelCase = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__lowerCamelCase = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__lowerCamelCase = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__lowerCamelCase = frozenset(["image", "mask_image"])
__lowerCamelCase = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__lowerCamelCase = frozenset(["example_image", "image", "mask_image"])
__lowerCamelCase = frozenset(["class_labels"])
__lowerCamelCase = frozenset(["class_labels"])
__lowerCamelCase = frozenset(["batch_size"])
__lowerCamelCase = frozenset([])
__lowerCamelCase = frozenset(["batch_size"])
__lowerCamelCase = frozenset([])
__lowerCamelCase = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__lowerCamelCase = frozenset(["prompt", "negative_prompt"])
__lowerCamelCase = frozenset(["input_tokens"])
__lowerCamelCase = frozenset(["input_tokens"])
| 190
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
__magic_name__ = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__magic_name__ = DatasetInfosDict.from_directory(__UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
__magic_name__ = str(__UpperCamelCase )
dataset_info.write_to_directory(__UpperCamelCase )
__magic_name__ = DatasetInfo.from_directory(__UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCamelCase , '''dataset_info.json''' ) )
def lowercase ( ) -> Any:
__magic_name__ = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__magic_name__ = dataset_info._to_yaml_dict()
assert sorted(__UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__magic_name__ = yaml.safe_dump(__UpperCamelCase )
__magic_name__ = yaml.safe_load(__UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowercase ( ) -> str:
__magic_name__ = DatasetInfo()
__magic_name__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> str:
__magic_name__ = str(__UpperCamelCase )
dataset_infos_dict.write_to_directory(__UpperCamelCase )
__magic_name__ = DatasetInfosDict.from_directory(__UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__magic_name__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__magic_name__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCamelCase , '''README.md''' ) )
| 190
| 1
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE__ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE__ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE__ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE__ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE__ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE__ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE__ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 267
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = MvpTokenizer
_UpperCamelCase : Any = MvpTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = filter_roberta_detectors
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , padding=snake_case , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("labels" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(text_target=snake_case , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization."]
UpperCamelCase__ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , text_target=snake_case , return_tensors="pt" )
UpperCamelCase__ = inputs["input_ids"]
UpperCamelCase__ = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = "A, <mask> AllenNLP sentence."
UpperCamelCase__ = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
UpperCamelCase__ = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 551
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__a = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if os.path.exists(_lowercase ):
if os.path.exists(os.path.join(_lowercase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_lowercase , '''config.json''' ) ):
os.remove(os.path.join(_lowercase , '''config.json''' ) )
if os.path.exists(os.path.join(_lowercase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_lowercase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_lowercase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_lowercase )
model.save_pretrained(_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 2
if unlogit:
UpperCAmelCase_ : int = torch.pow(_lowercase , _lowercase )
UpperCAmelCase_ : str = p * torch.log(_lowercase )
UpperCAmelCase_ : Dict = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_lowercase ) ) ) )
for row in range(len(_lowercase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase=True , _lowercase=None , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ : Tuple = torch.zeros(_lowercase , _lowercase ).to(args.device )
UpperCAmelCase_ : List[Any] = torch.zeros(_lowercase , _lowercase ).to(args.device )
if head_mask is None:
UpperCAmelCase_ : Dict = torch.ones(_lowercase , _lowercase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowercase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = 0.0
UpperCAmelCase_ : List[Any] = 0.0
for step, inputs in enumerate(tqdm(_lowercase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ : Optional[Any] = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_), ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ : Dict = model(_lowercase , labels=_lowercase , head_mask=_lowercase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Tuple = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowercase ):
UpperCAmelCase_ : Union[str, Any] = entropy(attn.detach() , _lowercase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowercase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : int = torch.pow(torch.pow(_lowercase , _lowercase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
UpperCAmelCase_ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_lowercase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_lowercase )
logger.info('''Head ranked by importance scores''' )
UpperCAmelCase_ : int = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ : Optional[Any] = head_ranks.view_as(_lowercase )
print_ad_tensor(_lowercase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : List[Any] = compute_heads_importance(_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase )
UpperCAmelCase_ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _lowercase , original_score * args.masking_threshold )
UpperCAmelCase_ : Union[str, Any] = torch.ones_like(_lowercase )
UpperCAmelCase_ : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ : int = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ : Any = float('''Inf''' )
UpperCAmelCase_ : List[str] = head_importance.view(-1 ).sort()[1]
if len(_lowercase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
UpperCAmelCase_ : Optional[Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ : Union[str, Any] = new_head_mask.view(-1 )
UpperCAmelCase_ : List[str] = 0.0
UpperCAmelCase_ : Tuple = new_head_mask.view_as(_lowercase )
UpperCAmelCase_ : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(_lowercase )
# Compute metric and head importance again
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , head_mask=_lowercase )
UpperCAmelCase_ : Optional[int] = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_lowercase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = datetime.now()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , compute_importance=_lowercase , head_mask=_lowercase )
UpperCAmelCase_ : List[str] = 1 / loss
UpperCAmelCase_ : int = datetime.now() - before_time
UpperCAmelCase_ : List[str] = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowercase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = [
v,
]
assert sum(len(_lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowercase )
UpperCAmelCase_ : str = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ : Tuple = datetime.now()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : int = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , compute_importance=_lowercase , head_mask=_lowercase , actually_pruned=_lowercase , )
UpperCAmelCase_ : Dict = 1 / loss
UpperCAmelCase_ : List[str] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _lowercase , _lowercase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _lowercase , _lowercase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_lowercase , args.output_dir )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_lowercase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_lowercase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_lowercase , type=_lowercase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_lowercase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_lowercase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_lowercase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_lowercase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_lowercase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_lowercase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_lowercase , default=42 )
parser.add_argument('''--local_rank''' , type=_lowercase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_lowercase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_lowercase , default='''''' , help='''Can be used for distant debugging.''' )
UpperCAmelCase_ : Tuple = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ : Optional[Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
UpperCAmelCase_ : Union[str, Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ : int = torch.device('''cuda''' , args.local_rank )
UpperCAmelCase_ : Any = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ : Tuple = nn.parallel.DistributedDataParallel(
_lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowercase )
elif args.n_gpu > 1:
UpperCAmelCase_ : int = nn.DataParallel(_lowercase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowercase )
torch.save(_lowercase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _lowercase )
# Prepare dataset
UpperCAmelCase_ : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ : List[Any] = (torch.from_numpy(_lowercase ),)
UpperCAmelCase_ : int = TensorDataset(*_lowercase )
UpperCAmelCase_ : Any = RandomSampler(_lowercase )
UpperCAmelCase_ : Tuple = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowercase , _lowercase , _lowercase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ : List[Any] = mask_heads(_lowercase , _lowercase , _lowercase )
prune_heads(_lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 300
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt'}
__a = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ConvBertTokenizer
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="[UNK]" ,_SCREAMING_SNAKE_CASE="[SEP]" ,_SCREAMING_SNAKE_CASE="[PAD]" ,_SCREAMING_SNAKE_CASE="[CLS]" ,_SCREAMING_SNAKE_CASE="[MASK]" ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,tokenize_chinese_chars=_SCREAMING_SNAKE_CASE ,strip_accents=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE ,normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : int = do_lower_case
UpperCAmelCase_ : Dict = strip_accents
UpperCAmelCase_ : Optional[Any] = tokenize_chinese_chars
UpperCAmelCase_ : Union[str, Any] = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = do_lower_case
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
UpperCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 300
| 1
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_A = load_file(_SCREAMING_SNAKE_CASE )
_A = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_A = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
_A = pipeline.text_encoder
else:
_A = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
_A = pipeline.unet
# find the target layer
_A = layer_infos.pop(0 )
while len(_SCREAMING_SNAKE_CASE ) > -1:
try:
_A = curr_layer.__getattr__(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_A = layer_infos.pop(0 )
elif len(_SCREAMING_SNAKE_CASE ) == 0:
break
except Exception:
if len(_SCREAMING_SNAKE_CASE ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_A = layer_infos.pop(0 )
_A = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_SCREAMING_SNAKE_CASE )
else:
pair_keys.append(_SCREAMING_SNAKE_CASE )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_A = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_A = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).unsqueeze(2 ).unsqueeze(3 )
else:
_A = state_dict[pair_keys[0]].to(torch.floataa )
_A = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# update visited list
for item in pair_keys:
visited.append(_SCREAMING_SNAKE_CASE )
return pipeline
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.7_5, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__A : int = parser.parse_args()
__A : Optional[Any] = args.base_model_path
__A : str = args.checkpoint_path
__A : Union[str, Any] = args.dump_path
__A : Tuple = args.lora_prefix_unet
__A : Optional[int] = args.lora_prefix_text_encoder
__A : Tuple = args.alpha
__A : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__A : Optional[int] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 27
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : int , snake_case_ : Union[str, Any]=1_3 , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : List[str]=True , snake_case_ : Optional[Any]=9_9 , snake_case_ : int=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[str]=3_7 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=5_1_2 , snake_case_ : Union[str, Any]=1_6 , snake_case_ : List[Any]=2 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Any=False , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]="None" , snake_case_ : Dict=3 , snake_case_ : Optional[int]=4 , snake_case_ : Any=None , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : List[Any] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : List[str] = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : int = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Union[str, Any] = type_sequence_label_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Any = num_labels
snake_case__ : Tuple = num_choices
snake_case__ : List[str] = relative_attention
snake_case__ : Optional[int] = position_biased_input
snake_case__ : Union[str, Any] = pos_att_type
snake_case__ : Optional[Any] = scope
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = None
if self.use_input_mask:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : Tuple = None
snake_case__ : Any = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : int ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : List[Any] = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
snake_case__ : Optional[int] = model(snake_case_ , token_type_ids=snake_case_ )[0]
snake_case__ : Dict = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Tuple = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Optional[int] = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : List[Any] = self.num_labels
snake_case__ : Any = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Dict = config_and_inputs
snake_case__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : int = DebertaVaModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Optional[int] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
snake_case__ : Tuple = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
snake_case__ : Dict = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 347
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowercase , )
assert hasattr(self , "env" )
def snake_case__ ( self : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
snake_case_ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version="py36" , )
def snake_case__ ( self : Union[str, Any] , __lowercase : Tuple ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowercase )
| 376
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case :Union[str, Any] =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(_A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_A ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class lowerCAmelCase__ ( _UpperCAmelCase ):
A_ : List[str] = ['pixel_values']
def __init__( self : Dict , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 255 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Optional[int] , ) -> None:
super().__init__(**__UpperCamelCase )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(__UpperCamelCase , param_name='crop_size' )
A = do_resize
A = size
A = do_center_crop
A = crop_size
A = resample
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[Any] , ) -> np.ndarray:
A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
A = get_resize_output_image_size(__UpperCamelCase , size['shortest_edge'] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
A = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ) -> np.ndarray:
A = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Any , ) -> int:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[Any] , ) -> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : Any , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = to_numpy_array(__UpperCamelCase )
if do_resize:
A = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
A = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
A = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase )
if do_normalize:
A = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
A = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : Optional[Any] , ) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(__UpperCamelCase , param_name='crop_size' )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A = make_batched(__UpperCamelCase )
A = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
A = {'pixel_values': videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 719
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : int , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=100 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : Optional[Any]=30 , __UpperCamelCase : Any=2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Tuple=True , __UpperCamelCase : str=True , __UpperCamelCase : int=32 , __UpperCamelCase : int=4 , __UpperCamelCase : int=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Tuple=10 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=[0, 1, 2, 3] , ) -> List[Any]:
A = parent
A = 100
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
A = out_indices
A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : str ) -> List[str]:
A = BeitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ) -> Tuple:
A = BeitForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
A = self.type_sequence_label_size
A = BeitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = BeitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> List[Any]:
A = self.num_labels
A = BeitForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
A = self.prepare_config_and_inputs()
A , A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : int = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : Optional[Any] = False
A_ : str = False
A_ : Any = False
def __UpperCamelCase ( self : str ) -> Optional[int]:
A = BeitModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def __UpperCamelCase ( self : List[str] ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __UpperCamelCase ( self : Dict ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCamelCase ), BeitForMaskedImageModeling]:
continue
A = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A = False
A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCamelCase ( self : str ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
A = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> str:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = BeitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Dict ) -> Dict:
A = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).pixel_values.to(__UpperCamelCase )
# prepare bool_masked_pos
A = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(pixel_values=__UpperCamelCase , bool_masked_pos=__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCamelCase , atol=1e-2 ) )
@slow
def __UpperCamelCase ( self : str ) -> Dict:
A = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
A = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
A = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
A = model.to(__UpperCamelCase )
A = BeitImageProcessor(do_resize=__UpperCamelCase , size=640 , do_center_crop=__UpperCamelCase )
A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
A = Image.open(ds[0]['file'] )
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
A = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCamelCase , )
else:
A = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
A = model.to(__UpperCamelCase )
A = BeitImageProcessor(do_resize=__UpperCamelCase , size=640 , do_center_crop=__UpperCamelCase )
A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
A = Image.open(ds[0]['file'] )
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits.detach().cpu()
A = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(500, 300)] )
A = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
A = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
A = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 224
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 475
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *_lowercase : List[str] , **_lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(_lowercase )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Optional[int]=None , _lowercase : Dict=None , _lowercase : Optional[int]=None , **_lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = {}, {}
if padding is not None:
UpperCAmelCase__ = padding
if truncation is not None:
UpperCAmelCase__ = truncation
if top_k is not None:
UpperCAmelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Any , _lowercase : Union["Image.Image", str] , _lowercase : str = None , **_lowercase : Optional[Any] ):
"""simple docstring"""
if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ):
UpperCAmelCase__ = {"image": image, "question": question}
else:
UpperCAmelCase__ = image
UpperCAmelCase__ = super().__call__(_lowercase , **_lowercase )
return results
def _UpperCAmelCase ( self : str , _lowercase : Union[str, Any] , _lowercase : int=False , _lowercase : int=False ):
"""simple docstring"""
UpperCAmelCase__ = load_image(inputs["image"] )
UpperCAmelCase__ = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase )
UpperCAmelCase__ = self.image_processor(images=_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
return model_inputs
def _UpperCAmelCase ( self : Any , _lowercase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_lowercase )
return model_outputs
def _UpperCAmelCase ( self : str , _lowercase : Union[str, Any] , _lowercase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_lowercase )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 475
| 1
|
import argparse
import json
import subprocess
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = []
a__ : Any = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
a__ : List[Any] = subprocess.run(lowerCamelCase , shell=lowerCamelCase , stdout=subprocess.PIPE )
a__ : Tuple = output.stdout.decode("utf-8" )
a__ : Any = json.loads(lowerCamelCase )
a__ : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowerCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
a__ : List[str] = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _A ( lowerCamelCase ):
return values.split("," )
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 629
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629
| 1
|
'''simple docstring'''
from collections.abc import Callable
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
snake_case__ : Union[str, Any] = a
snake_case__ : Any = b
if function(__SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(__SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
snake_case__ : Optional[int] = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) < 0:
snake_case__ : str = mid
else:
snake_case__ : int = mid
snake_case__ : Tuple = start + (end - start) / 2.0
return mid
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 270
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3
| 0
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: str = VideoToVideoSDPipeline
A: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
A: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
A: Any = PipelineTesterMixin.required_optional_params - {"latents"}
A: List[Any] = False
# No `output_type`.
A: List[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCamelCase__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCamelCase__ : int = CLIPTextModel(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str=0 ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('''mps''' ):
UpperCamelCase__ : List[str] = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ : List[str] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : List[Any] = self.get_dummy_components()
UpperCamelCase__ : Any = VideoToVideoSDPipeline(**lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Any = self.get_dummy_inputs(lowerCamelCase__ )
UpperCamelCase__ : str = '''np'''
UpperCamelCase__ : Any = sd_pipe(**lowerCamelCase__ ).frames
UpperCamelCase__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCamelCase__ : Any = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCamelCase__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ : Optional[int] = torch.randn((1, 10, 3, 1024, 576) , generator=lowerCamelCase__ )
UpperCamelCase__ : List[Any] = video.to('''cuda''' )
UpperCamelCase__ : Optional[int] = '''Spiderman is surfing'''
UpperCamelCase__ : Dict = pipe(lowerCamelCase__ , video=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=3 , output_type='''pt''' ).frames
UpperCamelCase__ : Any = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 106
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Optional[int] = BarthezTokenizer
A: Optional[int] = BarthezTokenizerFast
A: str = True
A: Optional[int] = True
def UpperCAmelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
super().setUp()
UpperCamelCase__ : Optional[Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
UpperCamelCase__ : int = tokenizer
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''<pad>'''
UpperCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase__ : Any = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase__ : Optional[int] = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ : List[Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ : Tuple = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : str = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.get_rust_tokenizer()
UpperCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ : str = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase__ : int = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCamelCase__ , )
| 106
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _a (_SCREAMING_SNAKE_CASE):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.0 , A__=0.0 , A__=0.02 , A__=1E-12 , A__=[5_12, 8_64] , A__=16 , A__=3 , A__=True , A__=1_00 , A__=True , A__=False , A__=1 , A__=5 , A__=2 , A__=5 , A__=2 , A__=0.1 , **A__ , ) -> int:
super().__init__(**_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = num_detection_tokens
_SCREAMING_SNAKE_CASE = use_mid_position_embeddings
_SCREAMING_SNAKE_CASE = auxiliary_loss
# Hungarian matcher
_SCREAMING_SNAKE_CASE = class_cost
_SCREAMING_SNAKE_CASE = bbox_cost
_SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE = giou_loss_coefficient
_SCREAMING_SNAKE_CASE = eos_coefficient
class _a (_SCREAMING_SNAKE_CASE):
"""simple docstring"""
SCREAMING_SNAKE_CASE = version.parse('1.11')
@property
def UpperCamelCase ( self ) -> List[Any]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase ( self ) -> List[str]:
return 1E-4
@property
def UpperCamelCase ( self ) -> List[Any]:
return 12
| 591
|
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 31
| 0
|
"""simple docstring"""
import math
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 713
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class A__( __magic_name__ ):
lowerCAmelCase = '''van'''
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_ratios
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = dropout_rate
| 690
| 0
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase__ :
def __init__( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 256
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = cva.imread(snake_case__ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
lowerCAmelCase__ = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
a_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
a_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 339
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class a_ :
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : int = {}
UpperCamelCase_ : Tuple = "gelu"
def __init__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str]=14 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=99 , snake_case__ : Optional[Any]=32 , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : List[str]=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=512 , snake_case__ : List[Any]=0.02 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = ffn_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case__ , )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = TFXGLMModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.config_tester.run_common_tests()
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFXGLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
super().test_resize_token_embeddings()
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[int]=True ):
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCAmelCase__ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
lowerCAmelCase__ = model.generate(snake_case__ , do_sample=snake_case__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
lowerCAmelCase__ = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
lowerCAmelCase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
lowerCAmelCase__ = model.generate(snake_case__ , do_sample=snake_case__ , seed=[7, 0] )
lowerCAmelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = """left"""
# use different length sentences to test batching
lowerCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""tf""" , padding=snake_case__ )
lowerCAmelCase__ = inputs["""input_ids"""]
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
| 644
| 0
|
from math import ceil
def _lowerCAmelCase ( __a , __a ) -> Tuple:
'''simple docstring'''
_UpperCamelCase :Dict =list(range(0 , __a ) )
_UpperCamelCase :int =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_UpperCamelCase :str =[]
for i in device_map_blocks:
if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__a )
# Missing blocks
_UpperCamelCase :Optional[Any] =[i for i in blocks if i not in device_map_blocks]
_UpperCamelCase :Any =[i for i in device_map_blocks if i not in blocks]
if len(__a ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(__a ) )
if len(__a ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(__a ) )
if len(__a ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(__a ) )
def _lowerCAmelCase ( __a , __a ) -> str:
'''simple docstring'''
_UpperCamelCase :Dict =list(range(__a ) )
_UpperCamelCase :Union[str, Any] =int(ceil(n_layers / len(__a ) ) )
_UpperCamelCase :Dict =[layers[i : i + n_blocks] for i in range(0 , __a , __a )]
return dict(zip(__a , __a ) )
| 710
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCamelCase : str = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class lowerCamelCase__ ( tr.AbstractTransform ):
def __init__( self , lowerCAmelCase__ = " " ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Dict =sentence_delimiter
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Dict:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _UpperCamelCase ( self , lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :int =[]
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCamelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCamelCase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCamelCase : int = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_lowerCamelCase : Tuple = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
_lowerCamelCase : Optional[int] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCamelCase :str =0
_UpperCamelCase :Tuple =0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Optional[int] =jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 512
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Any , *__snake_case : List[Any] , **__snake_case : Optional[int] )-> List[Any]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__snake_case : List[str] , **__snake_case : Union[str, Any] )-> Tuple:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__snake_case : Any , **__snake_case : Optional[int] )-> Union[str, Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : List[Any] , *__snake_case : str , **__snake_case : Union[str, Any] )-> Optional[int]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__snake_case : List[Any] , **__snake_case : Optional[int] )-> List[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__snake_case : Dict , **__snake_case : int )-> Any:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *__snake_case : Union[str, Any] , **__snake_case : Optional[int] )-> Tuple:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : str , *__snake_case : Tuple , **__snake_case : Any )-> Tuple:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Dict , *__snake_case : int , **__snake_case : Optional[Any] )-> int:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Optional[Any] , *__snake_case : int , **__snake_case : List[Any] )-> str:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__snake_case : Union[str, Any] , **__snake_case : Optional[int] )-> Optional[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__snake_case : Any , **__snake_case : List[Any] )-> List[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *__snake_case : Any , **__snake_case : Any )-> Union[str, Any]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__snake_case : List[Any] , **__snake_case : Any )-> List[str]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__snake_case : str , **__snake_case : str )-> Optional[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class _lowerCAmelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *__snake_case : Union[str, Any] , **__snake_case : int )-> str:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *__snake_case : int , **__snake_case : Optional[int] )-> Union[str, Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCAmelCase ( cls : Any , *__snake_case : int , **__snake_case : List[Any] )-> Union[str, Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 369
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74
| 0
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 94
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor]) -> Tuple:
'''simple docstring'''
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image):
__UpperCamelCase : int = [image]
if isinstance(image[0] , PIL.Image.Image):
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image[0].size
__UpperCamelCase , __UpperCamelCase : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__UpperCamelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__UpperCamelCase : Dict = np.concatenate(_lowerCamelCase , axis=0)
__UpperCamelCase : Any = np.array(_lowerCamelCase).astype(np.floataa) / 2_5_5.0
__UpperCamelCase : Union[str, Any] = image.transpose(0 , 3 , 1 , 2)
__UpperCamelCase : Optional[int] = 2.0 * image - 1.0
__UpperCamelCase : List[str] = torch.from_numpy(_lowerCamelCase)
elif isinstance(image[0] , torch.Tensor):
__UpperCamelCase : Dict = torch.cat(_lowerCamelCase , dim=0)
return image
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor]) -> Optional[int]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image):
__UpperCamelCase : Any = [mask]
if isinstance(mask[0] , PIL.Image.Image):
__UpperCamelCase , __UpperCamelCase : Optional[int] = mask[0].size
__UpperCamelCase , __UpperCamelCase : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__UpperCamelCase : List[str] = [np.array(m.convert("L").resize((w, h) , resample=PIL_INTERPOLATION["nearest"]))[None, :] for m in mask]
__UpperCamelCase : Dict = np.concatenate(_lowerCamelCase , axis=0)
__UpperCamelCase : Any = mask.astype(np.floataa) / 2_5_5.0
__UpperCamelCase : List[str] = 0
__UpperCamelCase : Dict = 1
__UpperCamelCase : List[Any] = torch.from_numpy(_lowerCamelCase)
elif isinstance(mask[0] , torch.Tensor):
__UpperCamelCase : Optional[Any] = torch.cat(_lowerCamelCase , dim=0)
return mask
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 42
_A = 42
def __init__( self :Optional[Any] , a :str , a :List[str] ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self :Union[str, Any] , a :Union[torch.Tensor, PIL.Image.Image] , a :Union[torch.Tensor, PIL.Image.Image] , a :int = 2_5_0 , a :float = 0.0 , a :int = 1_0 , a :int = 1_0 , a :Optional[Union[torch.Generator, List[torch.Generator]]] = None , a :Optional[str] = "pil" , a :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCamelCase : Tuple = image
__UpperCamelCase : Optional[Any] = _preprocess_image(a )
__UpperCamelCase : Optional[int] = original_image.to(device=self.device , dtype=self.unet.dtype )
__UpperCamelCase : Dict = _preprocess_mask(a )
__UpperCamelCase : Any = mask_image.to(device=self.device , dtype=self.unet.dtype )
__UpperCamelCase : str = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(a )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__UpperCamelCase : List[str] = original_image.shape
__UpperCamelCase : List[Any] = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a , a , a , self.device )
__UpperCamelCase : Tuple = eta
__UpperCamelCase : List[str] = self.scheduler.timesteps[0] + 1
__UpperCamelCase : str = generator[0] if isinstance(a , a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__UpperCamelCase : Union[str, Any] = self.unet(a , a ).sample
# compute previous image: x_t -> x_t-1
__UpperCamelCase : Tuple = self.scheduler.step(a , a , a , a , a , a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__UpperCamelCase : List[str] = self.scheduler.undo_step(a , a , a )
__UpperCamelCase : Dict = t
__UpperCamelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase : str = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 94
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,unet=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,safety_checker=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 512 ,SCREAMING_SNAKE_CASE_ = 512 ,SCREAMING_SNAKE_CASE_ = 50 ,SCREAMING_SNAKE_CASE_ = 7.5 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = 0.0 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = "pil" ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(SCREAMING_SNAKE_CASE_ )}.""" )
# get prompt text embeddings
snake_case : Any = self.tokenizer(
SCREAMING_SNAKE_CASE_ ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
snake_case : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case , snake_case , snake_case : Optional[int] = text_embeddings.shape
snake_case : Any = text_embeddings.repeat(1 ,SCREAMING_SNAKE_CASE_ ,1 )
snake_case : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt ,SCREAMING_SNAKE_CASE_ ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case : List[str]
if negative_prompt is None:
snake_case : int = [""""""]
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !="""
F""" {type(SCREAMING_SNAKE_CASE_ )}.""" )
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
snake_case : Optional[Any] = negative_prompt
snake_case : List[str] = text_input_ids.shape[-1]
snake_case : List[str] = self.tokenizer(
SCREAMING_SNAKE_CASE_ ,padding="""max_length""" ,max_length=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ,)
snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case : Optional[Any] = uncond_embeddings.shape[1]
snake_case : Union[str, Any] = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,1 )
snake_case : Any = uncond_embeddings.view(batch_size * num_images_per_prompt ,SCREAMING_SNAKE_CASE_ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
snake_case : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case : Optional[int] = torch.randn(
SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device="""cpu""" ,dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
snake_case : Any = torch.randn(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device="""cpu""" ,dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
snake_case : List[Any] = torch.randn(
SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device=self.device ,dtype=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = torch.randn(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device=self.device ,dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case : str = latents_reference.to(self.device )
snake_case : Optional[int] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case : Optional[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case : List[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case : List[str] = 0 if dx < 0 else dx
snake_case : Any = 0 if dy < 0 else dy
snake_case : Dict = max(-dx ,0 )
snake_case : Optional[Any] = max(-dy ,0 )
# import pdb
# pdb.set_trace()
snake_case : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : int = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# predict the noise residual
snake_case : str = self.unet(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case , snake_case : str = noise_pred.chunk(2 )
snake_case : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case : int = self.scheduler.step(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = 1 / 0.1_82_15 * latents
snake_case : Any = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
snake_case : str = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
snake_case : Tuple = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) ,return_tensors="""pt""" ).to(
self.device )
snake_case , snake_case : Optional[Any] = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case : List[str] = None
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ ,nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 36
|
__lowercase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36
| 1
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _lowerCamelCase( a ):
def is_in_circle(a , a ) -> bool:
__a = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(a ) )
# The ratio of the area for circle to square is pi/4.
__a = proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def _lowerCamelCase( a , a , a = 0.0 , a = 1.0 , ):
return mean(
function_to_integrate(uniform(a , a ) ) for _ in range(a ) ) * (max_value - min_value)
def _lowerCamelCase( a , a = 0.0 , a = 1.0 ):
def identity_function(a ) -> float:
return x
__a = area_under_curve_estimator(
a , a , a , a )
__a = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("******************" )
def _lowerCamelCase( a ):
def function_to_integrate(a ) -> float:
return sqrt(4.0 - x * x )
__a = area_under_curve_estimator(
a , a , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict=1_0 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Union[str, Any]=3_2 * 4 , lowerCAmelCase_ : int=3_2 * 6 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : int=3_2 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = is_training
__lowerCAmelCase = use_auxiliary_loss
__lowerCAmelCase = num_queries
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_size
__lowerCAmelCase = max_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = mask_feature_size
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase_ )
__lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase_ )
__lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase_ ) > 0.5
).float()
__lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase_ ) > 0.5).long()
__lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : str ) -> List[Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> Tuple:
__lowerCAmelCase = output.encoder_hidden_states
__lowerCAmelCase = output.pixel_decoder_hidden_states
__lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=False ) -> Tuple:
with torch.no_grad():
__lowerCAmelCase = MaskFormerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = MaskFormerForInstanceSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
def comm_check_on_output(lowerCAmelCase_ : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCAmelCase = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
comm_check_on_output(lowerCAmelCase_ )
__lowerCAmelCase = model(
pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ )
comm_check_on_output(lowerCAmelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = MaskFormerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> int:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def lowercase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def lowercase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : str ) -> Any:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : List[Any] ) -> Any:
pass
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCAmelCase = MaskFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = (self.model_tester.min_size,) * 2
__lowerCAmelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase_ ),
'mask_labels': torch.randn((2, 1_0, *size) , device=lowerCAmelCase_ ),
'class_labels': torch.zeros(2 , 1_0 , device=lowerCAmelCase_ ).long(),
}
__lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : str ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCAmelCase = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : str ) -> Union[str, Any]:
# only MaskFormerForInstanceSegmentation has the loss
__lowerCAmelCase = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case : Any = 1e-4
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Dict ) -> Optional[Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
__lowerCAmelCase = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
__lowerCAmelCase = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowerCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# masks_queries_logits
__lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
# class_queries_logits
__lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCAmelCase = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : Optional[int] ) -> str:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
__lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# masks_queries_logits
__lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
# class_queries_logits
__lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCAmelCase = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) )
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase_ )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCAmelCase = inputs['pixel_values'].to(lowerCAmelCase_ )
__lowerCAmelCase = [el.to(lowerCAmelCase_ ) for el in inputs['mask_labels']]
__lowerCAmelCase = [el.to(lowerCAmelCase_ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
self.assertTrue(outputs.loss is not None )
| 53
|
import argparse
import os
import re
import packaging.version
lowerCamelCase : Any = '''examples/'''
lowerCamelCase : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCamelCase : str = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCamelCase : str = '''README.md'''
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : Union[str, Any] = f.read()
__lowercase , __lowercase : Dict = REPLACE_PATTERNS[pattern]
__lowercase : Optional[int] = replace.replace("""VERSION""" , lowerCAmelCase_ )
__lowercase : int = re_pattern.sub(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
for folder, directories, fnames in os.walk(lowerCAmelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , pattern="""examples""" )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not patch:
update_version_in_examples(lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Optional[int] = """🤗 Transformers currently provides the following architectures"""
__lowercase : Union[str, Any] = """1. Want to contribute a new model?"""
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : Tuple = f.readlines()
# Find the start of the list.
__lowercase : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowercase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__lowercase : str = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase_ )
def snake_case_ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__lowercase : Union[str, Any] = f.read()
__lowercase : List[str] = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase_ ).groups()[0]
return packaging.version.parse(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any=False ):
__lowercase : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__lowercase : Dict = default_version.base_version
elif patch:
__lowercase : Tuple = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowercase : Optional[int] = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowercase : Optional[int] = input(F"Which version are you releasing? [{default_version}]" )
if len(lowerCAmelCase_ ) == 0:
__lowercase : Union[str, Any] = default_version
print(F"Updating version to {version}." )
global_version_update(lowerCAmelCase_ , patch=lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : List[Any] = get_version()
__lowercase : Optional[Any] = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowercase : Tuple = current_version.base_version
# Check with the user we got that right.
__lowercase : Optional[Any] = input(F"Which version are we developing now? [{dev_version}]" )
if len(lowerCAmelCase_ ) == 0:
__lowercase : List[str] = dev_version
print(F"Updating version to {version}." )
global_version_update(lowerCAmelCase_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCamelCase : List[str] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 149
| 0
|
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCamelCase ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: List[str] = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
UpperCAmelCase: Optional[Any] = [2, 4, 6, 8, 1_0, 1_2]
UpperCAmelCase: str = 1_0_0
self.assertEqual(kp.calc_profit(__snake_case , __snake_case , __snake_case ) , 2_1_0 )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(__snake_case , "max_weight must greater than zero." )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(__snake_case , "Weight can not be negative." )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(__snake_case , "Profit can not be negative." )
def A__ ( self ) -> str:
"""simple docstring"""
self.assertRaisesRegex(__snake_case , "max_weight must greater than zero." )
def A__ ( self ) -> int:
"""simple docstring"""
self.assertRaisesRegex(
__snake_case , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 166
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case_ : Any = logging.get_logger(__name__)
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Optional[int] = ['''input_values''', '''padding_mask''']
def __init__( self , __snake_case = 1 , __snake_case = 2_4_0_0_0 , __snake_case = 0.0 , __snake_case = None , __snake_case = None , **__snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
UpperCAmelCase: Any = chunk_length_s
UpperCAmelCase: Optional[int] = overlap
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __snake_case , __snake_case = None , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase: Optional[Any] = True
UpperCAmelCase: List[str] = bool(
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase: int = [np.asarray(__snake_case , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
UpperCAmelCase: Dict = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase: Optional[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase: Any = [np.asarray(__snake_case ).T]
# verify inputs are valid
for idx, example in enumerate(__snake_case ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
UpperCAmelCase: Tuple = None
UpperCAmelCase: str = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase: Any = min(array.shape[0] for array in raw_audio )
UpperCAmelCase: List[Any] = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase: List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase: Optional[Any] = max(array.shape[0] for array in raw_audio )
UpperCAmelCase: Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase: List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase: int = "max_length"
else:
UpperCAmelCase: Optional[Any] = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase: Union[str, Any] = self.pad(
__snake_case , max_length=__snake_case , truncation=__snake_case , padding=__snake_case , return_attention_mask=__snake_case , )
if padding:
UpperCAmelCase: str = padded_inputs.pop("attention_mask" )
UpperCAmelCase: Tuple = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
UpperCAmelCase: Optional[int] = example[..., None]
input_values.append(example.T )
UpperCAmelCase: str = input_values
if return_tensors is not None:
UpperCAmelCase: Optional[int] = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 166
| 1
|
import logging
from transformers import PretrainedConfig
snake_case__ = logging.getLogger(__name__)
snake_case__ = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __lowerCamelCase ):
a__: Union[str, Any] = """bertabs"""
def __init__( self : List[str] , lowerCAmelCase : Dict=3_0522 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : Any=8 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : List[str]=0.2 , lowerCAmelCase : Optional[int]=6 , lowerCAmelCase : int=768 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : str=2048 , lowerCAmelCase : Any=0.2 , **lowerCAmelCase : Dict , ):
super().__init__(**lowerCAmelCase )
lowercase : int = vocab_size
lowercase : Optional[Any] = max_pos
lowercase : List[str] = enc_layers
lowercase : Tuple = enc_hidden_size
lowercase : Any = enc_heads
lowercase : Union[str, Any] = enc_ff_size
lowercase : Optional[Any] = enc_dropout
lowercase : Optional[int] = dec_layers
lowercase : int = dec_hidden_size
lowercase : Tuple = dec_heads
lowercase : List[str] = dec_ff_size
lowercase : List[str] = dec_dropout
| 583
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase_ ( ):
lowercase : Any = torch.nn.Linear(2 , 4 )
lowercase : int = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowercase : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(UpperCAmelCase_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowercase : List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowercase : Optional[int] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase_ ( UpperCAmelCase_ : Any ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ):
lowercase : List[str] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(UpperCAmelCase_ )
class UpperCAmelCase ( __lowerCamelCase ):
@require_cuda
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : List[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCAmelCase ):
lowercase : Optional[Any] = Accelerator(cpu=lowerCAmelCase )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : int = Accelerator()
lowercase : Union[str, Any] = GradientState()
assert state.num_steps == 1
lowercase : Tuple = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase : Tuple = False
assert state.sync_gradients is False
GradientState._reset_state()
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : List[str] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = create_components()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _lowerCAmelCase ( self : int ):
lowercase : Optional[Any] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] = create_components()
accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _lowerCAmelCase ( self : Dict ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCAmelCase : Any , **lowerCAmelCase : Optional[int] ):
pass
with patch('''torch.cuda.set_device''' , lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
lowercase : Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : Tuple = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : int = create_components()
accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowercase : List[Any] = get_signature(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase )
# make sure random weights don't match
load_random_weights(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) < 1E-3 )
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : int = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : List[str] = create_components()
accelerator.prepare(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowercase : Tuple = get_signature(lowerCAmelCase )
# saving hook
def save_config(lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
lowercase : Optional[int] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(lowerCAmelCase , '''data.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
# loading hook
def load_config(lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
with open(os.path.join(lowerCAmelCase , '''data.json''' ) , '''r''' ) as f:
lowercase : Dict = json.load(lowerCAmelCase )
lowercase : Union[str, Any] = config['''class_name''']
lowercase : str = accelerator.register_save_state_pre_hook(lowerCAmelCase )
lowercase : Tuple = accelerator.register_load_state_pre_hook(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase : List[Any] = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase : Any = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : List[Any] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = create_components()
lowercase : int = None
# This should work
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : int = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def _lowerCAmelCase ( self : Dict ):
lowercase : str = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase : List[str] = create_components()
lowercase : List[str] = [1, 2, 3]
# This should work
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : Union[str, Any] = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(lowerCAmelCase , '''_is_accelerate_prepared''' , lowerCAmelCase ) , lowerCAmelCase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def _lowerCAmelCase ( self : Optional[int] ):
from transformers import AutoModelForCausalLM
lowercase : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=lowerCAmelCase , device_map={'''''': 0} , )
lowercase : List[str] = Accelerator()
# This should work
lowercase : Any = accelerator.prepare(lowerCAmelCase )
@slow
@require_bnb
def _lowerCAmelCase ( self : Optional[Any] ):
from transformers import AutoModelForCausalLM
lowercase : Optional[int] = Accelerator()
with init_empty_weights():
lowercase : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowercase : Optional[Any] = infer_auto_device_map(lowerCAmelCase )
lowercase : str = '''cpu'''
lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=lowerCAmelCase , load_in_abit=lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(lowerCAmelCase ):
lowercase : Optional[Any] = accelerator.prepare(lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def _lowerCAmelCase ( self : Union[str, Any] ):
from transformers import AutoModelForCausalLM
lowercase : Tuple = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowercase : int = infer_auto_device_map(lowerCAmelCase )
lowercase : Tuple = 1
lowercase : Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=lowerCAmelCase , device_map=lowerCAmelCase , )
lowercase : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCAmelCase ):
lowercase : str = accelerator.prepare(lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _lowerCAmelCase ( self : str ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
lowercase : Any = infer_auto_device_map(lowerCAmelCase )
lowercase : Any = 1
lowercase : int = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=lowerCAmelCase , device_map=lowerCAmelCase , )
lowercase : List[str] = Accelerator()
# This should work
lowercase : List[Any] = accelerator.prepare(lowerCAmelCase )
@require_cuda
def _lowerCAmelCase ( self : str ):
lowercase : int = torch.nn.Linear(10 , 10 )
lowercase : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
lowercase : Union[str, Any] = Accelerator(cpu=lowerCAmelCase )
lowercase : Union[str, Any] = accelerator.prepare(lowerCAmelCase )
| 583
| 1
|
'''simple docstring'''
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = BartTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : str="replace" , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Dict="</s>" , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : Dict="<mask>" , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : str=True , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =getattr(_lowerCAmelCase , pre_tok_state.pop('type'))
__lowercase =add_prefix_space
__lowercase =pre_tok_class(**_lowerCAmelCase)
__lowercase =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowercase ='post_processor'
__lowercase =getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase)
if tokenizer_component_instance:
__lowercase =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase =tuple(state['sep'])
if "cls" in state:
__lowercase =tuple(state['cls'])
__lowercase =False
if state.get('add_prefix_space' , _lowerCAmelCase) != add_prefix_space:
__lowercase =add_prefix_space
__lowercase =True
if state.get('trim_offsets' , _lowerCAmelCase) != trim_offsets:
__lowercase =trim_offsets
__lowercase =True
if changes_to_apply:
__lowercase =getattr(_lowerCAmelCase , state.pop('type'))
__lowercase =component_class(**_lowerCAmelCase)
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase)
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else value
__lowercase =value
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =kwargs.get('is_split_into_words' , _lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
__lowercase =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase)
return tuple(_lowerCAmelCase)
def __lowerCamelCase ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None):
'''simple docstring'''
__lowercase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 454
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( A__ , A__ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = StableDiffusionXLImgaImgPipeline
lowerCAmelCase_ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase_ : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=a_ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
snake_case_ = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
snake_case_ = CLIPTextModel(a_ )
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a_ )
snake_case_ = CLIPTextModelWithProjection(a_ )
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a_ )
snake_case_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self , a__ , a__=0 ) -> str:
'''simple docstring'''
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
snake_case_ = image / 2 + 0.5
if str(a_ ).startswith("mps" ):
snake_case_ = torch.manual_seed(a_ )
else:
snake_case_ = torch.Generator(device=a_ ).manual_seed(a_ )
snake_case_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.7_5,
}
return inputs
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionXLImgaImgPipeline(**a_ )
snake_case_ = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
snake_case_ = self.get_dummy_inputs(a_ )
snake_case_ = sd_pipe(**a_ ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionXLImgaImgPipeline(**a_ )
snake_case_ = sd_pipe.to(a_ )
snake_case_ = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
# forward without prompt embeds
snake_case_ = self.get_dummy_inputs(a_ )
snake_case_ = 3 * ["this is a negative prompt"]
snake_case_ = negative_prompt
snake_case_ = 3 * [inputs["prompt"]]
snake_case_ = sd_pipe(**a_ )
snake_case_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
snake_case_ = self.get_dummy_inputs(a_ )
snake_case_ = 3 * ["this is a negative prompt"]
snake_case_ = 3 * [inputs.pop("prompt" )]
(
snake_case_
) = sd_pipe.encode_prompt(a_ , negative_prompt=a_ )
snake_case_ = sd_pipe(
**a_ , prompt_embeds=a_ , negative_prompt_embeds=a_ , pooled_prompt_embeds=a_ , negative_pooled_prompt_embeds=a_ , )
snake_case_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , a__ , a__="cpu" , a__=torch.floataa , a__=0 ) -> List[Any]:
'''simple docstring'''
snake_case_ = torch.Generator(device=a_ ).manual_seed(a_ )
snake_case_ = np.random.RandomState(a_ ).standard_normal((1, 4, 64, 64) )
snake_case_ = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ )
snake_case_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
snake_case_ = self.get_inputs(a_ )
snake_case_ = pipe(**a_ ).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 400
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase__ = logging.get_logger(__name__)
# General docstring
lowercase__ = """RegNetConfig"""
# Base docstring
lowercase__ = """facebook/regnet-y-040"""
lowercase__ = [1, 1088, 7, 7]
# Image classification docstring
lowercase__ = """facebook/regnet-y-040"""
lowercase__ = """tabby, tabby cat"""
lowercase__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , a_ : int , a_ : int = 3 , a_ : int = 1 , a_ : int = 1 , a_ : Optional[str] = "relu" , **a_ : Optional[int] , ):
super().__init__(**a_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase_ : Tuple = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase_ : str = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=a_ , strides=a_ , padding="VALID" , groups=a_ , use_bias=a_ , name="convolution" , )
lowerCAmelCase_ : int = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase_ : Any = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase ( self : Optional[int] , a_ : Dict ):
lowerCAmelCase_ : List[Any] = self.convolution(self.padding(a_ ) )
lowerCAmelCase_ : Optional[int] = self.normalization(a_ )
lowerCAmelCase_ : Union[str, Any] = self.activation(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , a_ : RegNetConfig , **a_ : Optional[int] ):
super().__init__(**a_ )
lowerCAmelCase_ : Union[str, Any] = config.num_channels
lowerCAmelCase_ : Optional[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCamelCase ( self : List[str] , a_ : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = shape_list(a_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase_ : Any = tf.transpose(a_ , perm=(0, 2, 3, 1) )
lowerCAmelCase_ : Tuple = self.embedder(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , a_ : int , a_ : int = 2 , **a_ : Dict ):
super().__init__(**a_ )
lowerCAmelCase_ : List[str] = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=1 , strides=a_ , use_bias=a_ , name="convolution" )
lowerCAmelCase_ : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def lowerCamelCase ( self : Union[str, Any] , a_ : tf.Tensor , a_ : bool = False ):
return self.normalization(self.convolution(a_ ) , training=a_ )
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a_ : int , a_ : int , **a_ : List[Any] ):
super().__init__(**a_ )
lowerCAmelCase_ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name="pooler" )
lowerCAmelCase_ : str = [
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCamelCase ( self : str , a_ : str ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowerCAmelCase_ : Optional[Any] = self.pooler(a_ )
for layer_module in self.attention:
lowerCAmelCase_ : Union[str, Any] = layer_module(a_ )
lowerCAmelCase_ : Optional[int] = hidden_state * pooled
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 1 , **a_ : Any ):
super().__init__(**a_ )
lowerCAmelCase_ : Union[str, Any] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : Tuple = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ : Union[str, Any] = (
TFRegNetShortCut(a_ , stride=a_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase_ : str = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name="layer.2" ),
]
lowerCAmelCase_ : str = ACTaFN[config.hidden_act]
def lowerCamelCase ( self : str , a_ : Optional[int] ):
lowerCAmelCase_ : List[Any] = hidden_state
for layer_module in self.layers:
lowerCAmelCase_ : List[str] = layer_module(a_ )
lowerCAmelCase_ : Any = self.shortcut(a_ )
hidden_state += residual
lowerCAmelCase_ : Optional[int] = self.activation(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 1 , **a_ : Union[str, Any] ):
super().__init__(**a_ )
lowerCAmelCase_ : Optional[Any] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : Dict = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ : List[Any] = (
TFRegNetShortCut(a_ , stride=a_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase_ : int = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(a_ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name="layer.3" ),
]
lowerCAmelCase_ : Optional[int] = ACTaFN[config.hidden_act]
def lowerCamelCase ( self : List[Any] , a_ : Union[str, Any] ):
lowerCAmelCase_ : str = hidden_state
for layer_module in self.layers:
lowerCAmelCase_ : Tuple = layer_module(a_ )
lowerCAmelCase_ : Union[str, Any] = self.shortcut(a_ )
hidden_state += residual
lowerCAmelCase_ : Tuple = self.activation(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 2 , a_ : int = 2 , **a_ : Optional[Any] ):
super().__init__(**a_ )
lowerCAmelCase_ : Optional[int] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase_ : Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(a_ , a_ , a_ , stride=a_ , name="layers.0" ),
*[layer(a_ , a_ , a_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCamelCase ( self : Dict , a_ : Any ):
for layer_module in self.layers:
lowerCAmelCase_ : Any = layer_module(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , a_ : RegNetConfig , **a_ : List[str] ):
super().__init__(**a_ )
lowerCAmelCase_ : Tuple = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(a_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(a_ , a_ , a_ , depth=a_ , name=f'''stages.{i+1}''' ) )
def lowerCamelCase ( self : str , a_ : tf.Tensor , a_ : bool = False , a_ : bool = True ):
lowerCAmelCase_ : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ : List[Any] = hidden_states + (hidden_state,)
lowerCAmelCase_ : str = stage_module(a_ )
if output_hidden_states:
lowerCAmelCase_ : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=a_ , hidden_states=a_ )
@keras_serializable
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
a_ : Any = RegNetConfig
def __init__( self : Dict , a_ : Union[str, Any] , **a_ : Any ):
super().__init__(**a_ )
lowerCAmelCase_ : List[Any] = config
lowerCAmelCase_ : Optional[Any] = TFRegNetEmbeddings(a_ , name="embedder" )
lowerCAmelCase_ : List[str] = TFRegNetEncoder(a_ , name="encoder" )
lowerCAmelCase_ : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name="pooler" )
@unpack_inputs
def lowerCamelCase ( self : Dict , a_ : tf.Tensor , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , ):
lowerCAmelCase_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : int = self.embedder(a_ , training=a_ )
lowerCAmelCase_ : Optional[int] = self.encoder(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
lowerCAmelCase_ : str = encoder_outputs[0]
lowerCAmelCase_ : str = self.pooler(a_ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase_ : int = tf.transpose(a_ , perm=(0, 3, 1, 2) )
lowerCAmelCase_ : List[str] = tf.transpose(a_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase_ : Optional[int] = tuple([tf.transpose(a_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ , pooler_output=a_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[str] = RegNetConfig
a_ : Optional[Any] = """regnet"""
a_ : List[str] = """pixel_values"""
@property
def lowerCamelCase ( self : Union[str, Any] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowercase__ = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase__ = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : List[str] , a_ : RegNetConfig , *a_ : Any , **a_ : Dict ):
super().__init__(a_ , *a_ , **a_ )
lowerCAmelCase_ : List[str] = TFRegNetMainLayer(a_ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self : Tuple , a_ : tf.Tensor , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Any=False , ):
lowerCAmelCase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : str = self.regnet(
pixel_values=a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , A__ , )
class __lowerCamelCase ( A__ , A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : RegNetConfig , *a_ : Tuple , **a_ : int ):
super().__init__(a_ , *a_ , **a_ )
lowerCAmelCase_ : List[Any] = config.num_labels
lowerCAmelCase_ : List[Any] = TFRegNetMainLayer(a_ , name="regnet" )
# classification head
lowerCAmelCase_ : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self : Tuple , a_ : tf.Tensor = None , a_ : tf.Tensor = None , a_ : bool = None , a_ : bool = None , a_ : int=False , ):
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : List[str] = self.regnet(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
lowerCAmelCase_ : Dict = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : Tuple = self.classifier[0](a_ )
lowerCAmelCase_ : str = self.classifier[1](a_ )
lowerCAmelCase_ : List[str] = None if labels is None else self.hf_compute_loss(labels=a_ , logits=a_ )
if not return_dict:
lowerCAmelCase_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a_ , logits=a_ , hidden_states=outputs.hidden_states )
| 610
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
a__ = params
a__ = np.array(_a )
a__ = np.array([len(_a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _a ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def lowercase__ ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.params.max_model_input_size
a__ = self.lengths > max_len
logger.info(F'''Splitting {sum(_a )} too long sequences.''' )
def divide_chunks(_a , _a ):
return [l[i : i + n] for i in range(0 , len(_a ) , _a )]
a__ = []
a__ = []
if self.params.mlm:
a__ , a__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
a__ , a__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ = np.insert(_a , 0 , _a )
if sub_s[-1] != sep_id:
a__ = np.insert(_a , len(_a ) , _a )
assert len(_a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_a )
new_tok_ids.extend(_a )
new_lengths.extend([len(_a ) for l in sub_seqs] )
a__ = np.array(_a )
a__ = np.array(_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = len(self )
a__ = self.lengths > 11
a__ = self.token_ids[indices]
a__ = self.lengths[indices]
a__ = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def lowercase__ ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ = self.params.special_tok_ids['unk_token']
a__ = len(self )
a__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ = (unk_occs / self.lengths) < 0.5
a__ = self.token_ids[indices]
a__ = self.lengths[indices]
a__ = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def lowercase__ ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = [t[0] for t in batch]
a__ = [t[1] for t in batch]
assert len(_a ) == len(_a )
# Max for paddings
a__ = max(_a )
# Pad token ids
if self.params.mlm:
a__ = self.params.special_tok_ids['pad_token']
else:
a__ = self.params.special_tok_ids['unk_token']
a__ = [list(t.astype(_a ) ) + [pad_idx] * (max_seq_len_ - len(_a )) for t in token_ids]
assert len(tk_ ) == len(_a )
assert all(len(_a ) == max_seq_len_ for t in tk_ )
a__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ = torch.tensor(_a ) # (bs)
return tk_t, lg_t
| 702
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[int] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Tuple = np.array(data['target'])
__A : int = data['target_names']
__A , __A , __A , __A : Optional[Any] = train_test_split(X, y)
def lowerCAmelCase_ ( a : str , a : int ):
return np.linalg.norm(np.array(a ) - np.array(a ) )
def lowerCAmelCase_ ( a : Dict , a : str , a : List[Any] , a : Any , a : Dict=5 ):
a__ = zip(a , a )
# List of distances of all points from the point to be classified
a__ = []
for data_point in data:
a__ = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a__ = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a__ = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 126
| 0
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str ):
def get_masked_lm_array(__lowerCamelCase : str ):
__UpperCAmelCase : str = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase : Optional[int] = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
__UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_array(__lowerCamelCase : str ):
__UpperCAmelCase : Optional[int] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase : Union[str, Any] = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
__UpperCAmelCase : int = array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_layer_array(__lowerCamelCase : int , __lowerCamelCase : str ):
__UpperCAmelCase : List[Any] = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase : Union[str, Any] = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
if "kernel" in name:
__UpperCAmelCase : List[str] = array.transpose()
return torch.from_numpy(__lowerCamelCase )
def get_encoder_attention_layer_array(__lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : Union[str, Any] = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__UpperCAmelCase : Tuple = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = array.reshape(__lowerCamelCase )
if "kernel" in name:
__UpperCAmelCase : Any = array.transpose()
return torch.from_numpy(__lowerCamelCase )
print(f"""Loading model based on config from {config_path}...""" )
__UpperCAmelCase : List[Any] = BertConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[Any] = BertForMaskedLM(__lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__UpperCAmelCase : BertSelfAttention = layer.attention.self
__UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__lowerCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
__UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__lowerCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
__UpperCAmelCase : Any = get_encoder_attention_layer_array(
__lowerCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
__UpperCAmelCase : int = get_encoder_attention_layer_array(
__lowerCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
__UpperCAmelCase : Any = get_encoder_attention_layer_array(
__lowerCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
__UpperCAmelCase : int = get_encoder_attention_layer_array(
__lowerCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
__UpperCAmelCase : BertSelfOutput = layer.attention.output
__UpperCAmelCase : Optional[Any] = get_encoder_attention_layer_array(
__lowerCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
__UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
__lowerCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
__UpperCAmelCase : List[str] = get_encoder_layer_array(__lowerCamelCase , """_attention_layer_norm/gamma""" )
__UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__lowerCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
__UpperCAmelCase : BertIntermediate = layer.intermediate
__UpperCAmelCase : int = get_encoder_layer_array(__lowerCamelCase , """_intermediate_dense/kernel""" )
__UpperCAmelCase : Union[str, Any] = get_encoder_layer_array(__lowerCamelCase , """_intermediate_dense/bias""" )
# Output
__UpperCAmelCase : BertOutput = layer.output
__UpperCAmelCase : Optional[int] = get_encoder_layer_array(__lowerCamelCase , """_output_dense/kernel""" )
__UpperCAmelCase : int = get_encoder_layer_array(__lowerCamelCase , """_output_dense/bias""" )
__UpperCAmelCase : Dict = get_encoder_layer_array(__lowerCamelCase , """_output_layer_norm/gamma""" )
__UpperCAmelCase : Dict = get_encoder_layer_array(__lowerCamelCase , """_output_layer_norm/beta""" )
# Embeddings
__UpperCAmelCase : Union[str, Any] = get_encoder_array("""_position_embedding_layer/embeddings""" )
__UpperCAmelCase : Tuple = get_encoder_array("""_type_embedding_layer/embeddings""" )
__UpperCAmelCase : List[str] = get_encoder_array("""_embedding_norm_layer/gamma""" )
__UpperCAmelCase : Dict = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
__UpperCAmelCase : List[str] = model.cls.predictions.transform
__UpperCAmelCase : Tuple = get_masked_lm_array("""dense/kernel""" )
__UpperCAmelCase : List[str] = get_masked_lm_array("""dense/bias""" )
__UpperCAmelCase : Dict = get_masked_lm_array("""layer_norm/gamma""" )
__UpperCAmelCase : Optional[Any] = get_masked_lm_array("""layer_norm/beta""" )
__UpperCAmelCase : Optional[Any] = get_masked_lm_array("""embedding_table""" )
# Pooling
__UpperCAmelCase : Dict = BertPooler(config=__lowerCamelCase )
__UpperCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
__UpperCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(__lowerCamelCase )
# Integration test - should load without any errors ;)
__UpperCAmelCase : List[str] = BertForMaskedLM.from_pretrained(__lowerCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Union[str, Any] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 63
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
a_ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(snake_case )
class UpperCAmelCase_ :
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
elif titles is None or texts is None:
__lowercase : int = titles if texts is None else texts
return super().__call__(
UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles]
__lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts]
__lowercase : str = len(UpperCamelCase_ )
__lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" )
__lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids''']
__lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids''']
__lowercase : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ )
]
}
if return_attention_mask is not False:
__lowercase : str = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase : List[str] = attention_mask
return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]:
__lowercase : List[Any] = reader_input['''input_ids''']
__lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3]
__lowercase : Optional[int] = len(UpperCamelCase_ )
__lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ )
__lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__lowercase : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
__lowercase : List[Any] = len(UpperCamelCase_ )
__lowercase : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]:
__lowercase : Tuple = []
for start_index, start_score in enumerate(UpperCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ )
__lowercase : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
__lowercase : Any = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case )
class UpperCAmelCase_ ( snake_case , snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase =["input_ids", "attention_mask"]
| 76
| 0
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__a : int = logging.get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
_A : Union[str, Any] = None
@experimental
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
return _map_with_joblib(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = num_proc if num_proc <= len(_lowercase ) else len(_lowercase )
snake_case_ :int = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowercase ):
snake_case_ :List[str] = len(_lowercase ) // num_proc
snake_case_ :Any = len(_lowercase ) % num_proc
snake_case_ :Optional[int] = div * index + min(_lowercase, _lowercase )
snake_case_ :Union[str, Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(_lowercase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(_lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
snake_case_ :Optional[int] = None, None
if not disable_tqdm:
snake_case_ :List[str] = (RLock(),), tqdm.set_lock
with Pool(_lowercase, initargs=_lowercase, initializer=_lowercase ) as pool:
snake_case_ :Optional[Any] = pool.map(_lowercase, _lowercase )
logger.info(f"""Finished {num_proc} processes""" )
snake_case_ :Optional[int] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(_lowercase )} objects""" )
return mapped
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=_lowercase ):
return joblib.Parallel()(
joblib.delayed(_lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Dict = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case_ :Optional[int] = None
| 713
|
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Any , snake_case: List[Any] , snake_case: List[Any]=None ) -> Union[str, Any]:
snake_case_ :str = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , snake_case , getattr(snake_case , snake_case ) )
snake_case_ :Optional[int] = module._original_module if isinstance(snake_case , _PatchedModuleObj ) else module
class lowerCamelCase :
'''simple docstring'''
_A : Union[str, Any] = []
def __init__( self: Optional[Any] , snake_case: List[str] , snake_case: str , snake_case: int , snake_case: Dict=None ) -> Any:
snake_case_ :Union[str, Any] = obj
snake_case_ :List[str] = target
snake_case_ :Any = new
snake_case_ :Optional[Any] = target.split(""".""" )[0]
snake_case_ :Tuple = {}
snake_case_ :List[str] = attrs or []
def __enter__( self: Dict ) -> Optional[Any]:
*snake_case_, snake_case_ :List[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(snake_case ) ):
try:
snake_case_ :Optional[int] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
snake_case_ :Optional[Any] = getattr(self.obj , snake_case )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(snake_case , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
snake_case_ :int = obj_attr
# patch at top level
setattr(self.obj , snake_case , _PatchedModuleObj(snake_case , attrs=self.attrs ) )
snake_case_ :Optional[Any] = getattr(self.obj , snake_case )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(snake_case , snake_case , _PatchedModuleObj(getattr(snake_case , snake_case , snake_case ) , attrs=self.attrs ) )
snake_case_ :int = getattr(snake_case , snake_case )
# finally set the target attribute
setattr(snake_case , snake_case , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
snake_case_ :Tuple = getattr(import_module(""".""".join(snake_case ) ) , snake_case )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , snake_case ) is attr_value:
snake_case_ :Union[str, Any] = getattr(self.obj , snake_case )
setattr(self.obj , snake_case , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
snake_case_ :Dict = globals()["""__builtins__"""][target_attr]
setattr(self.obj , snake_case , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self: List[Any] , *snake_case: List[Any] ) -> int:
for attr in list(self.original ):
setattr(self.obj , snake_case , self.original.pop(snake_case ) )
def lowerCAmelCase_ ( self: List[str] ) -> int:
self.__enter__()
self._active_patches.append(self )
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 310
| 0
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_snake_case : Optional[Any] = '.'
if __name__ == "__main__":
_snake_case : str = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
_snake_case : Any = []
_snake_case : Optional[int] = []
with open(doctest_file_path) as fp:
for line in fp:
_snake_case : Optional[Any] = line.strip()
_snake_case : List[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_snake_case : Tuple = '\n'.join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 693
|
"""simple docstring"""
def UpperCAmelCase ( _lowercase : int = 1_0_0_0 ) -> int:
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = 1, 1
lowerCAmelCase_ = []
for i in range(1 , n + 1 ):
lowerCAmelCase_ = prev_numerator + 2 * prev_denominator
lowerCAmelCase_ = prev_numerator + prev_denominator
if len(str(_lowercase ) ) > len(str(_lowercase ) ):
result.append(_lowercase )
lowerCAmelCase_ = numerator
lowerCAmelCase_ = denominator
return len(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 552
| 0
|
def lowerCamelCase__ ( lowercase = 100 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : str = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 707
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = IFImgaImgSuperResolutionPipeline
UpperCamelCase_ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
UpperCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase_ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _A ( self : List[Any] ):
return self._get_superresolution_dummy_components()
def _A ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _A ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _A ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A ( self : Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A ( self : Tuple ):
self._test_save_load_local()
def _A ( self : List[str] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 488
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( snake_case ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case , snake_case ) -> bool:
SCREAMING_SNAKE_CASE:int = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE:Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE:Tuple = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def A_ ( snake_case , snake_case , snake_case = 0.0 , snake_case = 1.0 , ):
return mean(
function_to_integrate(uniform(snake_case , snake_case ) ) for _ in range(snake_case ) ) * (max_value - min_value)
def A_ ( snake_case , snake_case = 0.0 , snake_case = 1.0 ):
def identity_function(snake_case ) -> float:
return x
SCREAMING_SNAKE_CASE:Tuple = area_under_curve_estimator(
snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def A_ ( snake_case ):
def function_to_integrate(snake_case ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE:Union[str, Any] = area_under_curve_estimator(
snake_case , snake_case , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _a ):
_A : Optional[int] = ['''image_processor''', '''tokenizer''']
_A : Union[str, Any] = '''ViTImageProcessor'''
_A : List[str] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : int=None ,**SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE:List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __call__( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : str ):
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE:int = self.tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE:Tuple = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if images is not None:
SCREAMING_SNAKE_CASE:List[Any] = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE:Union[str, Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE:int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE:str = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) ,tensor_type=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : int ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Any ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@property
def __UpperCamelCase ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor_class
@property
def __UpperCamelCase ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,SCREAMING_SNAKE_CASE__ ,)
return self.image_processor
| 143
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.