code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ : List[str] = 'pt'
elif is_tf_available():
__magic_name__ : Dict = 'tf'
else:
__magic_name__ : Optional[int] = 'jax'
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = ByTaTokenizer
__a = False
def __a ( self: int ):
super().setUp()
__lowerCamelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self: Any ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def __a ( self: Tuple , **A_: List[Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __a ( self: str , A_: Tuple , A_: int=False , A_: List[Any]=20 , A_: str=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCamelCase = []
for i in range(len(A_ ) ):
try:
__lowerCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase = list(filter(lambda A_ : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , A_ ) )
__lowerCamelCase = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
__lowerCamelCase = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
__lowerCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
__lowerCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
__lowerCamelCase = """ """ + output_txt
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def __a ( self: int ):
__lowerCamelCase = self.ta_base_tokenizer
__lowerCamelCase = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
__lowerCamelCase = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def __a ( self: Any ):
__lowerCamelCase = self.ta_base_tokenizer
__lowerCamelCase = """Unicode €."""
__lowerCamelCase = tokenizer(A_ )
__lowerCamelCase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , A_ )
# decoding
__lowerCamelCase = tokenizer.decode(A_ )
self.assertEqual(A_ , """Unicode €.</s>""" )
__lowerCamelCase = tokenizer("""e è é ê ë""" )
__lowerCamelCase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , A_ )
# decoding
__lowerCamelCase = tokenizer.decode(A_ )
self.assertEqual(A_ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def __a ( self: Optional[int] ):
__lowerCamelCase = self.ta_base_tokenizer
__lowerCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__lowerCamelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__lowerCamelCase = tokenizer(A_ , padding=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
if FRAMEWORK != "jax":
__lowerCamelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __a ( self: Tuple ):
__lowerCamelCase = self.ta_base_tokenizer
__lowerCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCamelCase = tokenizer(A_ , padding=A_ , return_tensors=A_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , A_ )
self.assertIn("""attention_mask""" , A_ )
self.assertNotIn("""decoder_input_ids""" , A_ )
self.assertNotIn("""decoder_attention_mask""" , A_ )
def __a ( self: Optional[Any] ):
__lowerCamelCase = self.ta_base_tokenizer
__lowerCamelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
__lowerCamelCase = tokenizer(
text_target=A_ , max_length=32 , padding="""max_length""" , truncation=A_ , return_tensors=A_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = self.ta_base_tokenizer
__lowerCamelCase = ["""A long paragraph for summarization. </s>"""]
__lowerCamelCase = ["""Summary of the text. </s>"""]
# fmt: off
__lowerCamelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__lowerCamelCase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__lowerCamelCase = tokenizer(A_ , text_target=A_ )
self.assertEqual(A_ , batch["""input_ids"""][0] )
self.assertEqual(A_ , batch["""labels"""][0] )
def __a ( self: Dict ):
# safety check on max_len default value so we are sure the test works
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = """ He is very happy, UNwant\u00E9d,running"""
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__lowerCamelCase = tokenizer.__class__.from_pretrained(A_ )
__lowerCamelCase = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
shutil.rmtree(A_ )
__lowerCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__lowerCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__lowerCamelCase = tokenizer.__class__.from_pretrained(A_ )
__lowerCamelCase = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase = tokenizer.__class__.from_pretrained(A_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A_ )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
with open(os.path.join(A_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCamelCase = json.load(A_ )
with open(os.path.join(A_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCamelCase = json.load(A_ )
__lowerCamelCase = [f'<extra_id_{i}>' for i in range(1_25 )]
__lowerCamelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__lowerCamelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(A_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(A_ , A_ )
with open(os.path.join(A_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(A_ , A_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase = tokenizer_class.from_pretrained(
A_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=A_ )]
__lowerCamelCase = tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
__lowerCamelCase = tokenizer_class.from_pretrained(A_ )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def __a ( self: Any ):
pass
def __a ( self: Any ):
pass
def __a ( self: Optional[Any] ):
pass
def __a ( self: Tuple ):
pass
def __a ( self: Dict ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__lowerCamelCase = self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowerCamelCase = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
__lowerCamelCase = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(A_ , A_ )
def __a ( self: int ):
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowerCamelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__lowerCamelCase = 0
__lowerCamelCase = tokenizer.convert_ids_to_tokens(
A_ , skip_special_tokens=A_ )
for attr in attributes_list:
setattr(A_ , attr + """_id""" , A_ )
self.assertEqual(getattr(A_ , A_ ) , A_ )
self.assertEqual(getattr(A_ , attr + """_id""" ) , A_ )
setattr(A_ , attr + """_id""" , A_ )
self.assertEqual(getattr(A_ , A_ ) , A_ )
self.assertEqual(getattr(A_ , attr + """_id""" ) , A_ )
setattr(A_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(A_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(A_ , """additional_special_tokens_ids""" ) , [] )
setattr(A_ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(A_ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(A_ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 281
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__magic_name__ : Tuple = 'hf-internal-testing/tiny-random-bert'
__magic_name__ : Dict = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__magic_name__ : Optional[Any] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class __snake_case (unittest.TestCase ):
def __a ( self: List[Any] ):
__lowerCamelCase = cached_file(A_ , A_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A_ , A_ ) ) )
with open(os.path.join(A_ , """refs""" , """main""" ) ) as f:
__lowerCamelCase = f.read()
self.assertEqual(A_ , os.path.join(A_ , """snapshots""" , A_ , A_ ) )
self.assertTrue(os.path.isfile(A_ ) )
# File is cached at the same place the second time.
__lowerCamelCase = cached_file(A_ , A_ )
self.assertEqual(A_ , A_ )
# Using a specific revision to test the full commit hash.
__lowerCamelCase = cached_file(A_ , A_ , revision="""9b8c223""" )
self.assertEqual(A_ , os.path.join(A_ , """snapshots""" , A_ , A_ ) )
def __a ( self: int ):
with self.assertRaisesRegex(A_ , """is not a valid model identifier""" ):
__lowerCamelCase = cached_file("""tiny-random-bert""" , A_ )
with self.assertRaisesRegex(A_ , """is not a valid git identifier""" ):
__lowerCamelCase = cached_file(A_ , A_ , revision="""aaaa""" )
with self.assertRaisesRegex(A_ , """does not appear to have a file named""" ):
__lowerCamelCase = cached_file(A_ , """conf""" )
def __a ( self: Optional[int] ):
with self.assertRaisesRegex(A_ , """does not appear to have a file named""" ):
__lowerCamelCase = cached_file(A_ , """conf""" )
with open(os.path.join(A_ , """refs""" , """main""" ) ) as f:
__lowerCamelCase = f.read()
self.assertTrue(os.path.isfile(os.path.join(A_ , """.no_exist""" , A_ , """conf""" ) ) )
__lowerCamelCase = cached_file(A_ , """conf""" , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
__lowerCamelCase = cached_file(A_ , """conf""" , local_files_only=A_ , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
__lowerCamelCase = mock.Mock()
__lowerCamelCase = 5_00
__lowerCamelCase = {}
__lowerCamelCase = HTTPError
__lowerCamelCase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=A_ ) as mock_head:
__lowerCamelCase = cached_file(A_ , """conf""" , _raise_exceptions_for_connection_errors=A_ )
self.assertIsNone(A_ )
# This check we did call the fake head request
mock_head.assert_called()
def __a ( self: str ):
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , A_ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , A_ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , A_ ) )
def __a ( self: str ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A_ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , A_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A_ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , A_ , revision="""ahaha""" )
__lowerCamelCase = get_file_from_repo("""bert-base-cased""" , A_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCamelCase = json.loads(open(A_ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 7_68 )
def __a ( self: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = Path(A_ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(A_ , """a.txt""" ) , str(A_ ) )
self.assertIsNone(get_file_from_repo(A_ , """b.txt""" ) )
| 281
| 1
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
a__ = 1.0_54_57_18_17E-34 # unit of ℏ : J * s
a__ = 3E8 # unit of c : m * s^-1
def lowercase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
_snake_case : Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_snake_case : Optional[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_snake_case : str = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
|
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 1_000 ) -> int:
_snake_case , _snake_case : str = 1, 1
_snake_case : List[Any] = 2
while True:
_snake_case : Union[str, Any] = 0
_snake_case : int = fa + fa
_snake_case , _snake_case : Union[str, Any] = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 198
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> str:
'''simple docstring'''
__snake_case = SwinConfig()
__snake_case = swin_name.split('''_''' )
__snake_case = name_split[1]
__snake_case = int(name_split[4] )
__snake_case = int(name_split[3][-1] )
if model_size == "tiny":
__snake_case = 96
__snake_case = (2, 2, 6, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "small":
__snake_case = 96
__snake_case = (2, 2, 18, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "base":
__snake_case = 1_28
__snake_case = (2, 2, 18, 2)
__snake_case = (4, 8, 16, 32)
else:
__snake_case = 1_92
__snake_case = (2, 2, 18, 2)
__snake_case = (6, 12, 24, 48)
if "in22k" in swin_name:
__snake_case = 2_18_41
else:
__snake_case = 10_00
__snake_case = '''huggingface/label-files'''
__snake_case = '''imagenet-1k-id2label.json'''
__snake_case = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = img_size
__snake_case = num_classes
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
return config
def _UpperCamelCase (_lowerCamelCase : str )-> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name:
__snake_case = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__snake_case = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__snake_case = '''encoder.''' + name
if "attn.proj" in name:
__snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__snake_case = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__snake_case = '''layernorm.weight'''
if name == "norm.bias":
__snake_case = '''layernorm.bias'''
if "head" in name:
__snake_case = name.replace('''head''' , '''classifier''' )
else:
__snake_case = '''swin.''' + name
return name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__snake_case = key.split('''.''' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[3] )
__snake_case = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[
dim : dim * 2, :
]
__snake_case = val[-dim:, :]
else:
__snake_case = val[
:dim
]
__snake_case = val[
dim : dim * 2
]
__snake_case = val[
-dim:
]
else:
__snake_case = val
return orig_state_dict
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Any )-> List[Any]:
'''simple docstring'''
__snake_case = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
__snake_case = get_swin_config(_lowerCamelCase )
__snake_case = SwinForImageClassification(_lowerCamelCase )
model.eval()
__snake_case = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__snake_case = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
__snake_case = timm_model(inputs['''pixel_values'''] )
__snake_case = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 24
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 527
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
a : Union[str, Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
a : Dict = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
'''simple docstring'''
__lowercase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ) -> Optional[int]:
'''simple docstring'''
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__lowercase = json.load(snake_case_ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
__lowercase = merges_handle.read().split('''\n''' )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def A ( self ) -> List[str]:
'''simple docstring'''
return len(self.encoder )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(snake_case_ )
__lowercase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__lowercase = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(snake_case_ ):
try:
__lowercase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(snake_case_ )
__lowercase = new_word
if len(snake_case_ ) == 1:
break
else:
__lowercase = get_pairs(snake_case_ )
__lowercase = ''' '''.join(snake_case_ )
__lowercase = word
return word
def A ( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , snake_case_ ):
__lowercase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def A ( self , snake_case_ ) -> Dict:
'''simple docstring'''
return self.decoder.get(snake_case_ )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = ''''''.join(snake_case_ )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
__lowercase = 0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__lowercase = token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self , snake_case_ , snake_case_=False , **snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
__lowercase = ''' ''' + text
return (text, kwargs)
| 527
| 1
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __snake_case ( UpperCAmelCase_ : str = "laptop" ):
lowerCamelCase_ = F'''https://www.amazon.in/laptop/s?k={product}'''
lowerCamelCase_ = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
lowerCamelCase_ = BeautifulSoup(requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase_ = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
lowerCamelCase_ = item.ha.text
lowerCamelCase_ = "https://www.amazon.in/" + item.ha.a["href"]
lowerCamelCase_ = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
lowerCamelCase_ = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
lowerCamelCase_ = "Not available"
try:
lowerCamelCase_ = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
lowerCamelCase_ = ""
try:
lowerCamelCase_ = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
lowerCamelCase_ = float("nan" )
except AttributeError:
pass
lowerCamelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase_ = " "
lowerCamelCase_ = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
a_ : Dict = """headphones"""
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 675
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ = model
lowerCamelCase_ = 2
lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# load longformer model from model identifier
lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = LightningModel(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase_ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 675
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowercase ( _A ):
lowercase = 'roformer'
def __init__( self : Dict , __lowerCamelCase : List[str]=5_00_00 , __lowerCamelCase : str=None , __lowerCamelCase : Dict=7_68 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : List[Any]=30_72 , __lowerCamelCase : int="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=15_36 , __lowerCamelCase : str=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[str]=1E-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=True , **__lowerCamelCase : List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
lowercase = vocab_size
lowercase = hidden_size if embedding_size is None else embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = rotary_value
lowercase = use_cache
class __lowercase ( _A ):
@property
def __a ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase = {0: '''batch''', 1: '''sequence'''}
lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 704
|
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = math.sqrt(UpperCAmelCase )
lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0, UpperCAmelCase ):
for j in range(0, UpperCAmelCase ):
lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(UpperCAmelCase, UpperCAmelCase )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, )-> np.ndarray:
"""simple docstring"""
lowercase = np.zeros(img.shape )
lowercase = get_gauss_kernel(UpperCAmelCase, UpperCAmelCase )
lowercase ,lowercase = img.shape
for i in range(kernel_size // 2, size_x - kernel_size // 2 ):
for j in range(kernel_size // 2, size_y - kernel_size // 2 ):
lowercase = get_slice(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowercase = vec_gaussian(UpperCAmelCase, UpperCAmelCase )
lowercase = np.multiply(UpperCAmelCase, UpperCAmelCase )
lowercase = np.multiply(UpperCAmelCase, UpperCAmelCase )
lowercase = np.sum(UpperCAmelCase ) / np.sum(UpperCAmelCase )
lowercase = val
return imga
def __UpperCAmelCase ( UpperCAmelCase )-> tuple:
"""simple docstring"""
lowercase = args[1] if args[1:] else '''../image_data/lena.jpg'''
lowercase = float(args[2] ) if args[2:] else 1.0
lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowercase = int(args[4] )
lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
A_ , A_ , A_ , A_ = parse_args(sys.argv)
A_ = cva.imread(filename, 0)
cva.imshow("input image", img)
A_ = img / 255
A_ = out.astype("float32")
A_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
A_ = out * 255
A_ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 479
| 0
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Tuple = logging.get_logger()
@dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = field(default_factory=lowerCAmelCase_ )
_lowerCAmelCase = field(default_factory=lowerCAmelCase_ )
def a ( self , A_ , A_ , A_ ):
_UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def a ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 1
_lowerCAmelCase = field(default_factory=lowerCAmelCase_ )
_lowerCAmelCase = field(default_factory=lowerCAmelCase_ )
_lowerCAmelCase = True
def __call__( self , A_ ):
_UpperCamelCase = Tracker(self.dest )(A_ ).parametrized
_UpperCamelCase = Tracker(self.src )(A_ ).parametrized
_UpperCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_UpperCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ) and self.raise_if_mismatch:
raise Exception(
F"Numbers of operations are different. Source module has {len(A_ )} operations while"
F" destination module has {len(A_ )}." )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A_ ):
super().__init__()
_UpperCamelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F"Unexpected layer name {k}"
_UpperCamelCase = len(A_ ) + 1
feature_blocks.append((F"res{block_index}", v) )
_UpperCamelCase = nn.ModuleDict(A_ )
def a ( self , A_ ):
return get_trunk_forward_outputs(
A_ , out_feat_keys=A_ , feature_blocks=self._feature_blocks , )
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
def a ( self , A_ ):
_UpperCamelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , A_ ):
# default to timm!
if x not in self:
_UpperCamelCase = self.convert_name_to_timm(A_ )
_UpperCamelCase = partial(lambda: (timm.create_model(A_ , pretrained=A_ ).eval(), None) )
else:
_UpperCamelCase = super().__getitem__(A_ )
return val
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __getitem__( self , A_ ):
if "seer" in x and "in1k" not in x:
_UpperCamelCase = RegNetModel
else:
_UpperCamelCase = RegNetForImageClassification
return val
def lowercase__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : List[Tuple[str, str]] )-> Dict:
"""simple docstring"""
for from_key, to_key in keys:
_UpperCamelCase = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def lowercase__( _UpperCamelCase : str , _UpperCamelCase : Callable[[], nn.Module] , _UpperCamelCase : Callable[[], nn.Module] , _UpperCamelCase : RegNetConfig , _UpperCamelCase : Path , _UpperCamelCase : bool = True , )-> Optional[int]:
"""simple docstring"""
print(f"Converting {name}..." )
with torch.no_grad():
_UpperCamelCase , _UpperCamelCase = from_model_func()
_UpperCamelCase = our_model_func(_UpperCamelCase ).eval()
_UpperCamelCase = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase , raise_if_mismatch=_UpperCamelCase )
_UpperCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCamelCase )
if from_state_dict is not None:
_UpperCamelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_UpperCamelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_UpperCamelCase = manually_copy_vissl_head(_UpperCamelCase , our_model.state_dict() , _UpperCamelCase )
our_model.load_state_dict(_UpperCamelCase )
_UpperCamelCase = our_model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
_UpperCamelCase = (
our_outputs.logits if isinstance(_UpperCamelCase , _UpperCamelCase ) else our_outputs.last_hidden_state
)
_UpperCamelCase = from_model(_UpperCamelCase )
_UpperCamelCase = from_output[-1] if type(_UpperCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_UpperCamelCase = our_outputs.hidden_states[-1]
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
_UpperCamelCase = 224 if "seer" not in name else 384
# we can use the convnext one
_UpperCamelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_UpperCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
print(f"Pushed {name}" )
def lowercase__( _UpperCamelCase : Path , _UpperCamelCase : str = None , _UpperCamelCase : bool = True )-> int:
"""simple docstring"""
_UpperCamelCase = "imagenet-1k-id2label.json"
_UpperCamelCase = 1000
_UpperCamelCase = (1, num_labels)
_UpperCamelCase = "huggingface/label-files"
_UpperCamelCase = num_labels
_UpperCamelCase = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) ) , "r" ) )
_UpperCamelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
_UpperCamelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
_UpperCamelCase = NameToOurModelFuncMap()
_UpperCamelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_UpperCamelCase : str , _UpperCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_UpperCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase , model_dir=str(_UpperCamelCase ) , map_location="cpu" )
_UpperCamelCase = model_func()
# check if we have a head, if yes add it
_UpperCamelCase = files["classy_state_dict"]["base_model"]["model"]
_UpperCamelCase = model_state_dict["trunk"]
model.load_state_dict(_UpperCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
_UpperCamelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCamelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCamelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCamelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_UpperCamelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCamelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCamelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCamelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
return config, expected_shape
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
snake_case_ : Any = parser.parse_args()
snake_case_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 138
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__( )-> Optional[int]:
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_UpperCamelCase = nn.Linear(3 , 4 )
_UpperCamelCase = nn.BatchNormad(4 )
_UpperCamelCase = nn.Linear(4 , 5 )
def a ( self , A_ ):
return self.lineara(self.batchnorm(self.lineara(A_ ) ) )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A_ ):
nonlocal batch_sizes
batch_sizes.append(A_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A_ , [1_28, 64, 32, 16, 8] )
def a ( self ):
_UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A_ , A_ ):
nonlocal batch_sizes
batch_sizes.append(A_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCamelCase , _UpperCamelCase = mock_training_loop_function("hello" )
self.assertListEqual(A_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def a ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A_ ):
pass
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A_ , A_ , A_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A_ ) as cm:
mock_training_loop_function(1_28 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def a ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A_ ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def a ( self ):
_UpperCamelCase = torch.cuda.memory_allocated()
_UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A_ )
_UpperCamelCase = release_memory(A_ )
self.assertEqual(torch.cuda.memory_allocated() , A_ )
| 138
| 1
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__A = logging.getLogger(__name__)
class a :
def __init__( self : Optional[int] ) -> int:
__a = False
def lowerCAmelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ) -> Optional[Any]:
if not self.initialized:
__a = RagRetriever(
lowerCamelCase_ , question_encoder_tokenizer=lowerCamelCase_ , generator_tokenizer=lowerCamelCase_ , index=lowerCamelCase_ , init_retrieval=lowerCamelCase_ , )
__a = True
def lowerCAmelCase_ ( self : str ) -> Union[str, Any]:
self.retriever.index.init_index()
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ) -> Dict:
__a , __a = self.retriever._main_retrieve(lowerCamelCase_ , lowerCamelCase_ )
return doc_ids, retrieved_doc_embeds
class a ( A_ ):
def __init__( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=None ) -> str:
if index is not None and index.is_initialized() and len(lowerCamelCase_ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowerCamelCase_ , question_encoder_tokenizer=lowerCamelCase_ , generator_tokenizer=lowerCamelCase_ , index=lowerCamelCase_ , init_retrieval=lowerCamelCase_ , )
__a = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for worker in self.retrieval_workers
] )
def lowerCAmelCase_ ( self : str ) -> int:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__a , __a = ray.get(random_worker.retrieve.remote(lowerCamelCase_ , lowerCamelCase_ ) )
else:
__a , __a = self._main_retrieve(lowerCamelCase_ , lowerCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase_ )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : List[Any] ) -> Optional[int]:
return super(lowerCamelCase_ , cls ).get_tokenizers(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCAmelCase_ ( cls : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=None , **lowerCamelCase_ : Tuple ) -> Tuple:
__a = kwargs.pop("""config""" , lowerCamelCase_ ) or RagConfig.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
__a = RagTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = """custom"""
__a = CustomHFIndex(config.retrieval_vector_size , lowerCamelCase_ )
else:
__a = cls._build_index(lowerCamelCase_ )
return cls(
lowerCamelCase_ , question_encoder_tokenizer=lowerCamelCase_ , generator_tokenizer=lowerCamelCase_ , retrieval_workers=lowerCamelCase_ , index=lowerCamelCase_ , )
| 702
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCamelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__a = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" , revision=_lowerCAmelCase )
| 173
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "realm"
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=1e-3 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=320 , SCREAMING_SNAKE_CASE__=13353718 , SCREAMING_SNAKE_CASE__=5000 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Common config
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = retriever_proj_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_candidates
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
# Reader config
A__ = span_hidden_size
A__ = max_span_width
A__ = reader_layer_norm_eps
A__ = reader_beam_size
A__ = reader_seq_len
# Retrieval config
A__ = num_block_records
A__ = searcher_beam_size
| 104
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __UpperCAmelCase ( A : Optional[int] , A : Optional[int] ) -> str:
UpperCAmelCase_ : List[Any] = []
for part_id in partition_order:
UpperCAmelCase_ : Any = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : List[str] = spark.range(1_0_0 ).repartition(1 )
UpperCAmelCase_ : List[Any] = Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : Optional[Any] = spark.range(1_0 ).repartition(2 )
UpperCAmelCase_ : int = [1, 0]
UpperCAmelCase_ : str = _generate_iterable_examples(A , A ) # Reverse the partitions.
UpperCAmelCase_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase_ , UpperCAmelCase_ : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCAmelCase_ : str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : List[Any] = spark.range(1_0 ).repartition(1 )
UpperCAmelCase_ : Optional[int] = SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : Dict = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
UpperCAmelCase_ : Any = lambda A : x.reverse()
UpperCAmelCase_ : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
UpperCAmelCase_ : List[Any] = SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : int = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase_ : int = SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase_ : Tuple = SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : List[str] = spark.range(1_0_0 ).repartition(1 )
UpperCAmelCase_ : int = Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 541
| 0
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def a_ ( __snake_case ) -> List[Any]:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
__a : Union[str, Any] = parser.parse_args()
__a : Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 559
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A ( lowerCamelCase_ , lowerCamelCase_ ):
@register_to_config
def __init__( self : Optional[int] , __UpperCAmelCase : int = 128 , __UpperCAmelCase : int = 256 , __UpperCAmelCase : float = 2_000.0 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : int = 2048 , __UpperCAmelCase : float = 0.1 , ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Sequential(
nn.Linear(__UpperCAmelCase , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , )
UpperCamelCase_ = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = False
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(p=__UpperCAmelCase )
UpperCamelCase_ = nn.ModuleList()
for lyr_num in range(__UpperCAmelCase ):
# FiLM conditional T5 decoder
UpperCamelCase_ = DecoderLayer(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase )
self.decoders.append(__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(p=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
def lowercase__ ( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase_ = self.conditioning_emb(__UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase_ = torch.broadcast_to(
torch.arange(__UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase_ = self.position_encoding(__UpperCAmelCase )
UpperCamelCase_ = self.continuous_inputs_projection(__UpperCAmelCase )
inputs += position_encodings
UpperCamelCase_ = self.dropout(__UpperCAmelCase )
# decoder: No padding present.
UpperCamelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase_ = [(x, self.encoder_decoder_mask(__UpperCAmelCase , __UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase_ = lyr(
__UpperCAmelCase , conditioning_emb=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )[0]
UpperCamelCase_ = self.decoder_norm(__UpperCAmelCase )
UpperCamelCase_ = self.post_dropout(__UpperCAmelCase )
UpperCamelCase_ = self.spec_out(__UpperCAmelCase )
return spec_out
class A ( nn.Module ):
def __init__( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : str=1E-6 ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase ) )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.layer[0](
__UpperCAmelCase , conditioning_emb=__UpperCAmelCase , attention_mask=__UpperCAmelCase , )
if encoder_hidden_states is not None:
UpperCamelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase_ = self.layer[1](
__UpperCAmelCase , key_value_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase_ = self.layer[-1](__UpperCAmelCase , __UpperCAmelCase )
return (hidden_states,)
class A ( nn.Module ):
def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase )
UpperCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase )
UpperCamelCase_ = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[Any]=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
UpperCamelCase_ = self.FiLMLayer(__UpperCAmelCase , __UpperCAmelCase )
# Self-attention block
UpperCamelCase_ = self.attention(__UpperCAmelCase )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple=None , ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
UpperCamelCase_ = self.attention(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return layer_output
class A ( nn.Module ):
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = TaDenseGatedActDense(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase )
UpperCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
UpperCamelCase_ = self.film(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = self.DenseReluDense(__UpperCAmelCase )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
UpperCamelCase_ = NewGELUActivation()
def lowercase__ ( self : List[str] , __UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.act(self.wi_a(__UpperCAmelCase ) )
UpperCamelCase_ = self.wi_a(__UpperCAmelCase )
UpperCamelCase_ = hidden_gelu * hidden_linear
UpperCamelCase_ = self.dropout(__UpperCAmelCase )
UpperCamelCase_ = self.wo(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=1E-6 ) -> str:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.ones(__UpperCAmelCase ) )
UpperCamelCase_ = eps
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__UpperCAmelCase )
UpperCamelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A ( nn.Module ):
def lowercase__ ( self : List[Any] , __UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__UpperCAmelCase , 3.0 )) ))
class A ( nn.Module ):
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , out_features * 2 , bias=__UpperCAmelCase )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.scale_bias(__UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = torch.chunk(__UpperCAmelCase , 2 , -1 )
UpperCamelCase_ = x * (1 + scale) + shift
return x
| 559
| 1
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_UpperCamelCase : Optional[Any] = 300 # TEMPERATURE (unit = K)
def snake_case ( snake_case : Tuple , snake_case : List[Any] , snake_case : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowerCamelCase_ : int = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCAmelCase( __lowerCamelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['hidden_dim']
__a = CONFIG_MAP[model_name]['width_coef']
__a = CONFIG_MAP[model_name]['depth_coef']
__a = CONFIG_MAP[model_name]['image_size']
__a = CONFIG_MAP[model_name]['dropout_rate']
__a = CONFIG_MAP[model_name]['dw_padding']
__a = 'huggingface/label-files'
__a = 'imagenet-1k-id2label.json'
__a = 1000
__a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) )
__a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase( ):
__a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
def lowerCAmelCase( __lowerCamelCase ):
__a = CONFIG_MAP[model_name]['image_size']
__a = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=__lowerCamelCase , )
return preprocessor
def lowerCAmelCase( __lowerCamelCase ):
__a = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
__a = sorted(set(__lowerCamelCase ) )
__a = len(__lowerCamelCase )
__a = {b: str(__lowerCamelCase ) for b, i in zip(__lowerCamelCase , range(__lowerCamelCase ) )}
__a = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = 'efficientnet.' + item[1]
__a = 'classifier.weight'
__a = 'classifier.bias'
return key_mapping
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(__lowerCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(__lowerCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(__lowerCamelCase ) )
else:
__a = torch.from_numpy(__lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowerCamelCase )
@torch.no_grad()
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = model_classes[model_name](
include_top=__lowerCamelCase , weights='imagenet' , input_tensor=__lowerCamelCase , input_shape=__lowerCamelCase , pooling=__lowerCamelCase , classes=1000 , classifier_activation='softmax' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(__lowerCamelCase )
__a = EfficientNetForImageClassification(__lowerCamelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
__a = rename_keys(__lowerCamelCase )
replace_params(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(__lowerCamelCase )
__a = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**__lowerCamelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['image_size']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(__lowerCamelCase )
__a = np.expand_dims(__lowerCamelCase , axis=0 )
__a = original_model.predict(__lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowerCamelCase ):
os.mkdir(__lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(__lowerCamelCase )
preprocessor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__a = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__lowerCamelCase )
hf_model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 559
| 0
|
from collections.abc import Callable
class lowercase :
def __init__( self : List[str] , _UpperCamelCase : Callable | None = None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE = key or (lambda _UpperCamelCase : x)
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __snake_case( self : Tuple , _UpperCamelCase : int ) -> int | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __snake_case( self : List[Any] , _UpperCamelCase : int ) -> int | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __snake_case( self : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.arr[j], self.arr[i]
def __snake_case( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __snake_case( self : Any , _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._left(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self._right(_UpperCamelCase )
SCREAMING_SNAKE_CASE = i
if left is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = left
if right is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = right
return valid_parent
def __snake_case( self : Optional[int] , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._parent(_UpperCamelCase )
while parent is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
self._swap(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parent, self._parent(_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._get_valid_parent(_UpperCamelCase )
while valid_parent != index:
self._swap(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = valid_parent, self._get_valid_parent(_UpperCamelCase )
def __snake_case( self : str , _UpperCamelCase : int , _UpperCamelCase : int ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE = self.pos_map[item]
SCREAMING_SNAKE_CASE = [item, self.key(_UpperCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_UpperCamelCase )
self._heapify_down(_UpperCamelCase )
def __snake_case( self : str , _UpperCamelCase : int ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_UpperCamelCase )
self._heapify_down(_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_UpperCamelCase )] )
else:
SCREAMING_SNAKE_CASE = [item, self.key(_UpperCamelCase )]
SCREAMING_SNAKE_CASE = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __snake_case( self : List[Any] ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def __snake_case( self : int ) -> tuple | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowerCamelCase ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647
| 1
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
snake_case_ : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=1 ):
'''simple docstring'''
UpperCamelCase = tokenizer
UpperCamelCase = dataset
UpperCamelCase = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
UpperCamelCase = n_copies
def __iter__( self ):
'''simple docstring'''
UpperCamelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCamelCase = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = start_length
UpperCamelCase = eof_strings
UpperCamelCase = tokenizer
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __snake_case ( _UpperCAmelCase : Optional[int]):
UpperCamelCase = re.split('''(%s)''' % '''|'''.join(_UpperCAmelCase), _UpperCAmelCase)
# last string should be ""
return "".join(string_list[:-2])
def __snake_case ( _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : str, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=20, **_UpperCAmelCase : List[str]):
UpperCamelCase = defaultdict(_UpperCAmelCase) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_UpperCAmelCase)):
with torch.no_grad():
UpperCamelCase = batch['''ids'''].shape[-1]
UpperCamelCase = accelerator.unwrap_model(_UpperCAmelCase).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=_UpperCAmelCase, **_UpperCAmelCase)
# each task is generated batch_size times
UpperCamelCase = batch['''task_id'''].repeat(_UpperCAmelCase)
UpperCamelCase = accelerator.pad_across_processes(
_UpperCAmelCase, dim=1, pad_index=tokenizer.pad_token_id)
UpperCamelCase , UpperCamelCase = accelerator.gather((generated_tokens, generated_tasks))
UpperCamelCase = generated_tokens.cpu().numpy()
UpperCamelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_UpperCAmelCase, _UpperCAmelCase):
gen_token_dict[task].append(_UpperCAmelCase)
UpperCamelCase = [[] for _ in range(_UpperCAmelCase)]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase)
code_gens[task].append(remove_last_block(_UpperCAmelCase))
return code_gens
def __snake_case ( ):
# Setup configuration
UpperCamelCase = HfArgumentParser(_UpperCAmelCase)
UpperCamelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase = '''false'''
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase = Accelerator()
set_seed(args.seed, device_specific=_UpperCAmelCase)
# Load model and tokenizer
UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
UpperCamelCase = tokenizer.eos_token
UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
# Generation settings
UpperCamelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, _UpperCAmelCase, _UpperCAmelCase)]),
}
# Load evaluation dataset and metric
UpperCamelCase = load_dataset('''openai_humaneval''')
UpperCamelCase = load_metric('''code_eval''')
UpperCamelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''])
UpperCamelCase = args.n_samples // args.batch_size
UpperCamelCase = TokenizedDataset(_UpperCAmelCase, human_eval['''test'''], n_copies=_UpperCAmelCase, n_tasks=_UpperCAmelCase)
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase = DataLoader(_UpperCAmelCase, batch_size=1)
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase = code_eval_metric.compute(references=[''''''], predictions=[['''''']])
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''')
raise exception
UpperCamelCase , UpperCamelCase = accelerator.prepare(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = complete_code(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, n_tasks=_UpperCAmelCase, batch_size=args.batch_size, **_UpperCAmelCase, )
if accelerator.is_main_process:
UpperCamelCase = []
for task in tqdm(range(_UpperCAmelCase)):
UpperCamelCase = human_eval['''test'''][task]['''test''']
UpperCamelCase = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point)
# Evaluate completions with "code_eval" metric
UpperCamelCase , UpperCamelCase = code_eval_metric.compute(
references=_UpperCAmelCase, predictions=_UpperCAmelCase, num_workers=args.num_workers)
print(f'Results: {pass_at_k}')
# Save results to json file
with open(args.output_file, '''w''') as fp:
json.dump(_UpperCAmelCase, _UpperCAmelCase)
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 212
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : str):
UpperCamelCase = ''''''
for i in table:
res += inp[i - 1]
return res
def __snake_case ( _UpperCAmelCase : Dict):
return data[1:] + data[0]
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : str):
UpperCamelCase = ''''''
for i in range(len(_UpperCAmelCase)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : Dict):
UpperCamelCase = int('''0b''' + data[0] + data[-1], 2)
UpperCamelCase = int('''0b''' + data[1:3], 2)
return bin(s[row][col])[2:]
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[int]):
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = xor(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = apply_sbox(_UpperCAmelCase, temp[:4]) # noqa: E741
UpperCamelCase = apply_sbox(_UpperCAmelCase, temp[4:])
UpperCamelCase = '''0''' * (2 - len(_UpperCAmelCase)) + l # noqa: E741
UpperCamelCase = '''0''' * (2 - len(_UpperCAmelCase)) + r
UpperCamelCase = apply_table(l + r, _UpperCAmelCase)
UpperCamelCase = xor(_UpperCAmelCase, _UpperCAmelCase)
return temp + right
if __name__ == "__main__":
snake_case_ : List[Any] = input('Enter 10 bit key: ')
snake_case_ : Union[str, Any] = input('Enter 8 bit message: ')
snake_case_ : List[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
snake_case_ : Dict = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
snake_case_ : Tuple = [2, 4, 3, 1]
snake_case_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case_ : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case_ : str = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case_ : Optional[int] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case_ : int = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case_ : Union[str, Any] = apply_table(key, paa_table)
snake_case_ : Optional[int] = temp[:5]
snake_case_ : str = temp[5:]
snake_case_ : str = left_shift(left)
snake_case_ : Dict = left_shift(right)
snake_case_ : List[Any] = apply_table(left + right, pa_table)
snake_case_ : Union[str, Any] = left_shift(left)
snake_case_ : Union[str, Any] = left_shift(right)
snake_case_ : str = left_shift(left)
snake_case_ : Tuple = left_shift(right)
snake_case_ : List[str] = apply_table(left + right, pa_table)
# encryption
snake_case_ : Any = apply_table(message, IP)
snake_case_ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
snake_case_ : int = temp[4:] + temp[:4]
snake_case_ : List[str] = function(expansion, sa, sa, keya, temp)
snake_case_ : Dict = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
snake_case_ : List[Any] = apply_table(CT, IP)
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : List[Any] = temp[4:] + temp[:4]
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : Tuple = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 212
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Dict =logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] ={"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase__ : List[Any] ={
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
UpperCAmelCase__ : Tuple ={
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
@lru_cache()
def _lowercase ( ) -> Optional[Any]:
lowerCamelCase =(
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCamelCase =bs[:]
lowerCamelCase =0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase =[chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase =set()
lowerCamelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase =char
return pairs
class __A ( __lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="replace" , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
lowerCamelCase =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
lowerCamelCase =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
lowerCamelCase =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
lowerCamelCase =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
lowerCamelCase =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
lowerCamelCase =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase =json.load(lowerCamelCase__ )
lowerCamelCase ={v: k for k, v in self.encoder.items()}
lowerCamelCase =errors # how to handle errors in decoding
lowerCamelCase =bytes_to_unicode()
lowerCamelCase ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase =merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase =[tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowerCamelCase ={}
lowerCamelCase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase =re.compile(r"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCAmelCase_ ):
if token in self.cache:
return self.cache[token]
lowerCamelCase =tuple(lowerCamelCase__ )
lowerCamelCase =get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
lowerCamelCase =min(lowerCamelCase__ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase =bigram
lowerCamelCase =[]
lowerCamelCase =0
while i < len(lowerCamelCase__ ):
try:
lowerCamelCase =word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase =j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase =tuple(lowerCamelCase__ )
lowerCamelCase =new_word
if len(lowerCamelCase__ ) == 1:
break
else:
lowerCamelCase =get_pairs(lowerCamelCase__ )
lowerCamelCase =''' '''.join(lowerCamelCase__ )
lowerCamelCase =word
return word
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =[]
for token in re.findall(self.pat , lowerCamelCase__ ):
lowerCamelCase =''''''.join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(""" """ ) )
return bpe_tokens
def _snake_case ( self , UpperCAmelCase_ ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCAmelCase_ ):
return self.decoder.get(lowerCamelCase__ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =''''''.join(lowerCamelCase__ )
lowerCamelCase =bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase =os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase =os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" )
lowerCamelCase =0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase =token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
lowerCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False , **UpperCAmelCase_ ):
lowerCamelCase =kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
lowerCamelCase =''' ''' + text
return (text, kwargs)
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : str ={'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] =['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any =['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict =[
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 269
| 0
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = 0
for ch in input_str:
UpperCAmelCase_ = ord(_UpperCamelCase )
UpperCAmelCase_ = pow(2 , _UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase_ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
UpperCAmelCase_ = F"""{src_lang}-{tgt_lang}"""
UpperCAmelCase_ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''README.md''' )
print(F"""Generating {path}""" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
lowercase__ : Dict = Path(__file__).resolve().parent.parent.parent
lowercase__ : Any = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowercase__ : str = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 390
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : int = {"vocab_file": "spiece.model"}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<sep>" , lowerCamelCase="<pad>" , lowerCamelCase="<cls>" , lowerCamelCase="<mask>" , lowerCamelCase=["<eop>", "<eod>"] , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : Tuple = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase , remove_space=lowerCamelCase , keep_accents=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : Any = 3
snake_case__ : Dict = do_lower_case
snake_case__ : str = remove_space
snake_case__ : int = keep_accents
snake_case__ : Tuple = vocab_file
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
snake_case__ : Dict = jieba
snake_case__ : int = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.__dict__.copy()
snake_case__ : str = None
return state
def __setstate__( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : str = {}
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if self.remove_space:
snake_case__ : Optional[int] = ''' '''.join(inputs.strip().split() )
else:
snake_case__ : Union[str, Any] = inputs
snake_case__ : Tuple = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
snake_case__ : Tuple = unicodedata.normalize('''NFKD''' , lowerCamelCase )
snake_case__ : int = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase )] )
if self.do_lower_case:
snake_case__ : Any = outputs.lower()
return outputs
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.preprocess_text(lowerCamelCase )
snake_case__ : Tuple = self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
snake_case__ : List[str] = []
for piece in pieces:
if len(lowerCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
snake_case__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case__ : Dict = cur_pieces[1:]
else:
snake_case__ : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase )
else:
new_pieces.append(lowerCamelCase )
return new_pieces
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Optional[int] = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1, 1]
return ([0] * len(lowerCamelCase )) + [1, 1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Union[str, Any] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = super()._decode(*lowerCamelCase , **lowerCamelCase )
snake_case__ : Any = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 694
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694
| 1
|
"""simple docstring"""
from collections import deque
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowercase__ : int = process_name # process name
lowercase__ : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ : int = arrival_time
lowercase__ : Any = burst_time # remaining burst time
lowercase__ : List[str] = 0 # total time of the process wait in ready queue
lowercase__ : Union[str, Any] = 0 # time from arrival time to completion time
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Dict:
lowercase__ : Dict = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ : int = time_slices
# unfinished process is in this ready_queue
lowercase__ : List[Any] = queue
# current time
lowercase__ : Dict = current_time
# finished process is in this sequence queue
lowercase__ : deque[Process] = deque()
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
lowercase__ : str = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
lowercase__ : List[str] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Any:
lowercase__ : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
return [q.burst_time for q in queue]
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
lowercase__ : deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE__ ) != 0:
lowercase__ : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ : str = 0
# set the process's turnaround time because it is finished
lowercase__ : str = self.current_time - cp.arrival_time
# set the completion time
lowercase__ : Optional[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
lowercase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowercase__ : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ : Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ : Optional[int] = 0
# set the finish time
lowercase__ : Dict = self.current_time
# update the process' turnaround time because it is finished
lowercase__ : Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__( self ) -> List[str]:
for i in range(self.number_of_queues - 1 ):
lowercase__ : Union[str, Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__snake_case = Process('P1', 0, 53)
__snake_case = Process('P2', 0, 17)
__snake_case = Process('P3', 0, 68)
__snake_case = Process('P4', 0, 24)
__snake_case = 3
__snake_case = [17, 25]
__snake_case = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
__snake_case = Process('P1', 0, 53)
__snake_case = Process('P2', 0, 17)
__snake_case = Process('P3', 0, 68)
__snake_case = Process('P4', 0, 24)
__snake_case = 3
__snake_case = [17, 25]
__snake_case = deque([Pa, Pa, Pa, Pa])
__snake_case = MLFQ(number_of_queues, time_slices, queue, 0)
__snake_case = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 200
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 47
| 0
|
def A(__a: int = 6008_5147_5143 ):
try:
lowerCAmelCase_ = int(__a )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
lowerCAmelCase_ = 2
lowerCAmelCase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCAmelCase_ = i
while n % i == 0:
lowerCAmelCase_ = n // i
i += 1
return int(__a )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 226
|
from __future__ import annotations
def A(__a: float , __a: float , __a: float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
| 1
|
"""simple docstring"""
import string
from math import logaa
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Union[str, Any] = document.translate(
str.maketrans("", "", string.punctuation ) ).replace("\n", "" )
snake_case_ :Tuple = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Dict = corpus.lower().translate(
str.maketrans("", "", string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case_ :Any = corpus_without_punctuation.split("\n" )
snake_case_ :Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_A ))
def A ( _A, _A, _A=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ), 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ), 3 )
def A ( _A, _A ):
"""simple docstring"""
return round(tf * idf, 3 )
| 584
|
"""simple docstring"""
def A ( _A ):
"""simple docstring"""
return 10 - x * x
def A ( _A, _A ):
"""simple docstring"""
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError("Wrong space!" )
snake_case_ :Any = a
while (b - a) >= 0.01:
# Find middle point
snake_case_ :Tuple = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
snake_case_ :Dict = c
else:
snake_case_ :Union[str, Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 584
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
lowerCamelCase__: int =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "width_multiplier"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : Any=64 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Tuple="swish" , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=0.25 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Dict=0.0 , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =parent
lowerCamelCase__: Optional[Any] =batch_size
lowerCamelCase__: Optional[Any] =image_size
lowerCamelCase__: List[str] =patch_size
lowerCamelCase__: Any =num_channels
lowerCamelCase__: int =make_divisible(512 * width_multiplier , divisor=8)
lowerCamelCase__: str =hidden_act
lowerCamelCase__: Dict =conv_kernel_size
lowerCamelCase__: int =output_stride
lowerCamelCase__: Optional[int] =classifier_dropout_prob
lowerCamelCase__: Optional[Any] =use_labels
lowerCamelCase__: Optional[int] =is_training
lowerCamelCase__: Optional[Any] =num_labels
lowerCamelCase__: Any =initializer_range
lowerCamelCase__: Optional[int] =scope
lowerCamelCase__: str =width_multiplier
lowerCamelCase__: int =ffn_dropout
lowerCamelCase__: Tuple =attn_dropout
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Optional[Any] =None
lowerCamelCase__: str =None
if self.use_labels:
lowerCamelCase__: Tuple =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: Dict =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCamelCase__: str =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =MobileViTVaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: str =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.num_labels
lowerCamelCase__: str =MobileViTVaForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.num_labels
lowerCamelCase__: Dict =MobileViTVaForSemanticSegmentation(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: List[str] =model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__: List[Any] =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_config_and_inputs()
lowerCamelCase__: Union[str, Any] =config_and_inputs
lowerCamelCase__: int ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =MobileViTVaModelTester(self)
lowerCamelCase__: List[Any] =MobileViTVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions")
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run.")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Dict =[*signature.parameters.keys()]
lowerCamelCase__: List[Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
lowerCamelCase__: Dict =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Optional[int] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Any =outputs.hidden_states
lowerCamelCase__: str =5
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__: Tuple =2
for i in range(len(UpperCAmelCase_)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: str =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: int =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Optional[Any] =MobileViTVaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Dict =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256").to(
UpperCAmelCase_)
lowerCamelCase__: Dict =self.default_image_processor
lowerCamelCase__: str =prepare_img()
lowerCamelCase__: Union[str, Any] =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: int =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: Any =torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
lowerCamelCase__: List[Any] =model.to(UpperCAmelCase_)
lowerCamelCase__: Any =MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
lowerCamelCase__: Optional[Any] =prepare_img()
lowerCamelCase__: Tuple =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: Tuple =model(**UpperCAmelCase_)
lowerCamelCase__: Dict =outputs.logits
# verify the logits
lowerCamelCase__: Union[str, Any] =torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , UpperCAmelCase_)
lowerCamelCase__: Tuple =torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
lowerCamelCase__: Any =model.to(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
lowerCamelCase__: Union[str, Any] =prepare_img()
lowerCamelCase__: str =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: Any =model(**UpperCAmelCase_)
lowerCamelCase__: Any =outputs.logits.detach().cpu()
lowerCamelCase__: Dict =image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(50, 60)])
lowerCamelCase__: Union[str, Any] =torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , UpperCAmelCase_)
| 720
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_2_vision_model"
def __init__(self : Union[str, Any] , UpperCAmelCase_ : int=1_408 , UpperCAmelCase_ : List[str]=6_144 , UpperCAmelCase_ : List[Any]=39 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=224 , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : str=0.0_0001 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : str=1E-1_0 , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Any =hidden_size
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: Dict =patch_size
lowerCamelCase__: List[Any] =image_size
lowerCamelCase__: Union[str, Any] =initializer_range
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Union[str, Any] =layer_norm_eps
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: Union[str, Any] =qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: str =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
lowerCamelCase__: Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_2_qformer"
def __init__(self : str , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1E-1_2 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=1_408 , **UpperCAmelCase_ : Optional[int] , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Tuple =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: Optional[Any] =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: Any =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =max_position_embeddings
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: Tuple =position_embedding_type
lowerCamelCase__: List[Any] =cross_attention_frequency
lowerCamelCase__: Tuple =encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Tuple =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
lowerCamelCase__: Any =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip-2"
lowercase_ = True
def __init__(self : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=32 , **UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if vision_config is None:
lowerCamelCase__: Optional[int] ={}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")
if qformer_config is None:
lowerCamelCase__: str ={}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")
if text_config is None:
lowerCamelCase__: Union[str, Any] ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
lowerCamelCase__: Optional[Any] =BlipaVisionConfig(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =BlipaQFormerConfig(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase__: Dict =CONFIG_MAPPING[text_model_type](**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.text_config.tie_word_embeddings
lowerCamelCase__: List[str] =self.text_config.is_encoder_decoder
lowerCamelCase__: Dict =num_query_tokens
lowerCamelCase__: Optional[Any] =self.vision_config.hidden_size
lowerCamelCase__: Tuple =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__: List[Any] =1.0
lowerCamelCase__: Union[str, Any] =0.02
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , UpperCAmelCase_ : BlipaVisionConfig , UpperCAmelCase_ : BlipaQFormerConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : int , ) ->Optional[int]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =copy.deepcopy(self.__dict__)
lowerCamelCase__: Any =self.vision_config.to_dict()
lowerCamelCase__: Any =self.qformer_config.to_dict()
lowerCamelCase__: Any =self.text_config.to_dict()
lowerCamelCase__: int =self.__class__.model_type
return output
| 437
| 0
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = -1
snake_case__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case__ : List[str] = TextStreamer(__SCREAMING_SNAKE_CASE )
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case__ : List[str] = cs.out[:-1]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = -1
snake_case__ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer.decode(greedy_ids[0] )
snake_case__ : Tuple = TextIteratorStreamer(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
snake_case__ : Tuple = Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE )
thread.start()
snake_case__ : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = -1
snake_case__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE )
snake_case__ : str = greedy_ids[:, input_ids.shape[1] :]
snake_case__ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case__ : Any = TextStreamer(__SCREAMING_SNAKE_CASE , skip_prompt=__SCREAMING_SNAKE_CASE )
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case__ : Any = cs.out[:-1]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case__ : str = AutoTokenizer.from_pretrained("""distilgpt2""" )
snake_case__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = -1
snake_case__ : Union[str, Any] = torch.ones((1, 5) , device=__SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case__ : Optional[int] = TextStreamer(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case__ : Tuple = cs.out[:-1] # Remove the final "\n"
snake_case__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __UpperCamelCase ( self ):
snake_case__ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = -1
snake_case__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : str = TextIteratorStreamer(__SCREAMING_SNAKE_CASE , timeout=0.001 )
snake_case__ : Dict = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
snake_case__ : List[str] = Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 38
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __magic_name__( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = create_tensor(__UpperCAmelCase )
_lowerCamelCase = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __magic_name__( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = [state.process_index]
_lowerCamelCase = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, F'{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __magic_name__( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = create_tensor(__UpperCAmelCase )
_lowerCamelCase = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __magic_name__( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
_lowerCamelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_lowerCamelCase = torch.arange(state.num_processes ).to(state.device )
_lowerCamelCase = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __magic_name__( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if state.num_processes != 2:
return
_lowerCamelCase = create_tensor(__UpperCAmelCase )
_lowerCamelCase = reduce(__UpperCAmelCase , '''sum''' )
_lowerCamelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F'{reduced_tensor} != {truth_tensor}'
def __magic_name__( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if state.num_processes != 2:
return
_lowerCamelCase = create_tensor(__UpperCAmelCase )
_lowerCamelCase = reduce(__UpperCAmelCase , '''mean''' )
_lowerCamelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F'{reduced_tensor} != {truth_tensor}'
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
def __magic_name__( ) -> int:
'''simple docstring'''
_lowerCamelCase = PartialState()
state.print(F'State: {state}' )
state.print('''testing gather''' )
test_gather(__UpperCAmelCase )
state.print('''testing gather_object''' )
test_gather_object(__UpperCAmelCase )
state.print('''testing broadcast''' )
test_broadcast(__UpperCAmelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__UpperCAmelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__UpperCAmelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 706
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 0
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__snake_case ="""\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__snake_case ="""\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__snake_case ="""
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict="auto" , UpperCAmelCase__ : Union[str, Any]=-1 , UpperCAmelCase__ : int=0.9 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Optional[int]=5_0_0 , UpperCAmelCase__ : List[str]="gpt2-large" , UpperCAmelCase__ : Any=-1 , UpperCAmelCase__ : int=1_0_2_4 , UpperCAmelCase__ : Union[str, Any]=2_5 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=2_5 , ) -> Tuple:
lowerCAmelCase = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 133
| 0
|
'''simple docstring'''
import os
import sys
_A: int = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_A: List[Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase )-> List[Any]:
return AutoConfig.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase )-> Optional[int]:
return AutoTokenizer.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase )-> Optional[Any]:
return AutoModel.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase )-> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase )-> Any:
return AutoModelForMaskedLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase )-> List[Any]:
return AutoModelForSequenceClassification.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase )-> Optional[int]:
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
| 704
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_A: str = """Create a default config file for Accelerate with only a few flags set."""
def _lowerCAmelCase ( _lowerCAmelCase="no" , _lowerCAmelCase = default_json_config_file , _lowerCAmelCase = False )-> List[Any]:
__UpperCAmelCase = Path(_lowerCAmelCase )
path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
__UpperCAmelCase = num_gpus
__UpperCAmelCase = False
if num_gpus > 1:
__UpperCAmelCase = 'MULTI_GPU'
else:
__UpperCAmelCase = 'NO'
elif is_xpu_available() and use_xpu:
__UpperCAmelCase = torch.xpu.device_count()
__UpperCAmelCase = num_xpus
__UpperCAmelCase = False
if num_xpus > 1:
__UpperCAmelCase = 'MULTI_XPU'
else:
__UpperCAmelCase = 'NO'
elif is_npu_available():
__UpperCAmelCase = torch.npu.device_count()
__UpperCAmelCase = num_npus
__UpperCAmelCase = False
if num_npus > 1:
__UpperCAmelCase = 'MULTI_NPU'
else:
__UpperCAmelCase = 'NO'
else:
__UpperCAmelCase = 0
__UpperCAmelCase = True
__UpperCAmelCase = 1
__UpperCAmelCase = 'NO'
__UpperCAmelCase = ClusterConfig(**_lowerCAmelCase )
config.to_json_file(_lowerCAmelCase )
return path
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> List[str]:
__UpperCAmelCase = parser.add_parser('default' , parents=_lowerCAmelCase , help=_lowerCAmelCase , formatter_class=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _lowerCAmelCase ( _lowerCAmelCase )-> Union[str, Any]:
__UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 617
| 0
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCamelCase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCamelCase_ = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
UpperCamelCase_ = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCamelCase_ = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCamelCase_ = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[int]:
__UpperCAmelCase =re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , snake_case__ )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__UpperCAmelCase =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase ={
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase =collections.defaultdict(snake_case__ )
__UpperCAmelCase =collections.defaultdict(snake_case__ )
__UpperCAmelCase =collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(snake_case__ ):
__UpperCAmelCase =None
if _re_tf_models.match(snake_case__ ) is not None:
__UpperCAmelCase =tf_models
__UpperCAmelCase =_re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
__UpperCAmelCase =flax_models
__UpperCAmelCase =_re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
__UpperCAmelCase =pt_models
__UpperCAmelCase =_re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase =True
break
# Try again after removing the last word in the name
__UpperCAmelCase =''''''.join(camel_case_split(snake_case__ )[:-1] )
__UpperCAmelCase =set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase =list(snake_case__ )
all_models.sort()
__UpperCAmelCase ={'''model_type''': all_models}
__UpperCAmelCase =[pt_models[t] for t in all_models]
__UpperCAmelCase =[tf_models[t] for t in all_models]
__UpperCAmelCase =[flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase ={}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase ='''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase ='''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase ='''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase ='''AutoTokenizer'''
__UpperCAmelCase =[processors[t] for t in all_models]
return pd.DataFrame(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[str]:
__UpperCAmelCase =[
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase =[model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase =[auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(snake_case__ , snake_case__ , snake_case__ ):
# The type of pipeline may not exist in this framework
if not hasattr(snake_case__ , snake_case__ ):
continue
# First extract all model_names
__UpperCAmelCase =[]
for name in getattr(snake_case__ , snake_case__ ).values():
if isinstance(snake_case__ , snake_case__ ):
model_names.append(snake_case__ )
else:
model_names.extend(list(snake_case__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Any:
__UpperCAmelCase =get_frameworks_table()
__UpperCAmelCase =Dataset.from_pandas(snake_case__ )
__UpperCAmelCase =hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=snake_case__ )
__UpperCAmelCase =Dataset.from_json(snake_case__ )
__UpperCAmelCase ={
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(snake_case__ ) )
}
__UpperCAmelCase =update_pipeline_and_auto_class_table(snake_case__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase =sorted(table.keys() )
__UpperCAmelCase =pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase =Dataset.from_pandas(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(snake_case__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(snake_case__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase =(
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase ='''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=snake_case__ , repo_type='''dataset''' , token=snake_case__ , commit_message=snake_case__ , )
def SCREAMING_SNAKE_CASE ( ) -> str:
__UpperCAmelCase ={tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase =transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase =[]
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase =pipeline_tasks[key]['''pt''']
if isinstance(snake_case__ , (list, tuple) ):
__UpperCAmelCase =model[0]
__UpperCAmelCase =model.__name__
if model not in in_table.values():
missing.append(snake_case__ )
if len(snake_case__ ) > 0:
__UpperCAmelCase =''', '''.join(snake_case__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
UpperCamelCase_ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 132
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Union[str, Any] = '''convbert'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=2 , UpperCAmelCase=9 , UpperCAmelCase=1 , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =embedding_size
__UpperCAmelCase =head_ratio
__UpperCAmelCase =conv_kernel_size
__UpperCAmelCase =num_groups
__UpperCAmelCase =classifier_dropout
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
def A__ (self):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 132
| 1
|
def _a ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCAmelCase = []
for char_count in range(__SCREAMING_SNAKE_CASE ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 702
|
from __future__ import annotations
_UpperCamelCase: Dict =8.9_88e9 # units = N * m^s * C^-2
def _a ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
_lowerCAmelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
_lowerCAmelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_lowerCAmelCase = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_lowerCAmelCase = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_lowerCAmelCase = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : List[Any]=18 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=True , ):
A_ = size if size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = apply_ocr
def __A ( self : str ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self : Any ):
A_ = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "apply_ocr" ) )
def __A ( self : Any ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Optional[int] ):
pass
def __A ( self : List[str] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase )
self.assertIsInstance(encoding.boxes , UpperCAmelCase )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Union[str, Any] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Tuple ):
# with apply_OCR = True
A_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
A_ = Image.open(ds[0]["file"] ).convert("RGB" )
A_ = image_processing(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
A_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase )
self.assertListEqual(encoding.boxes , UpperCAmelCase )
# with apply_OCR = False
A_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase )
A_ = image_processing(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 86
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ : int = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ : Any = logging.WARNING
def _A ( ):
a__ : Any = os.getenv("DATASETS_VERBOSITY" , lowerCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def _A ( ):
return __name__.split("." )[0]
def _A ( ):
return logging.getLogger(_get_library_name() )
def _A ( ):
# Apply our default configuration to the library root logger.
a__ : List[str] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _A ( ):
a__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _A ( lowerCamelCase = None ):
if name is None:
a__ : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCamelCase )
def _A ( ):
return _get_library_root_logger().getEffectiveLevel()
def _A ( lowerCamelCase ):
_get_library_root_logger().setLevel(lowerCamelCase )
def _A ( ):
return set_verbosity(lowerCamelCase )
def _A ( ):
return set_verbosity(lowerCamelCase )
def _A ( ):
return set_verbosity(lowerCamelCase )
def _A ( ):
return set_verbosity(lowerCamelCase )
def _A ( ):
a__ : Dict = False
def _A ( ):
a__ : List[str] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __lowerCAmelCase :
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]: # pylint: disable=unused-argument
"""simple docstring"""
a__ : List[Any] = args[0] if args else None
def __iter__( self ) -> Tuple:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
def empty_fn(*snake_case , **snake_case ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
return
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
class __lowerCAmelCase :
def __call__( self , *snake_case , snake_case=False , **snake_case ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case , **snake_case )
else:
return EmptyTqdm(*snake_case , **snake_case )
def _snake_case ( self , *snake_case , **snake_case ) -> Dict:
"""simple docstring"""
a__ : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case , **snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ : Optional[int] = _tqdm_cls()
def _A ( ):
global _tqdm_active
return bool(_tqdm_active )
def _A ( ):
global _tqdm_active
a__ : str = True
def _A ( ):
global _tqdm_active
a__ : Union[str, Any] = False
| 629
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 122
|
"""simple docstring"""
from math import sqrt
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
lowercase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(__SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase_ : Union[str, Any] = False
break
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase_ : int = list(range(2 , n + 1 ) )
lowercase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(__SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase_ : List[str] = 0
# filters actual prime numbers.
lowercase_ : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
lowercase_ : Optional[int] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__SCREAMING_SNAKE_CASE ):
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
lowercase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowercase_ : Union[str, Any] = 2
lowercase_ : int = number
if number == 0 or number == 1:
ans.append(__SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(__SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(__SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowercase_ : Tuple = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = max(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase_ : str = 0
# prime factorization of 'number'
lowercase_ : str = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = min(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(__SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
lowercase_ : Optional[int] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase_ : Dict = get_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : int = len(__SCREAMING_SNAKE_CASE )
# run variable for while-loops.
lowercase_ : str = 0
lowercase_ : str = None
# exit variable. for break up the loops
lowercase_ : str = True
while i < len_pn and loop:
lowercase_ : Dict = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase_ : List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (len(__SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase_ : Tuple = 0
while numbera != 0:
lowercase_ : List[str] = numbera % numbera
lowercase_ : str = numbera
lowercase_ : int = rest
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase_ : int = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = prime_factorization(__SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
lowercase_ : str = []
lowercase_ : Optional[int] = []
lowercase_ : int = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : List[Any] = 0
lowercase_ : int = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase_ : Tuple = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
lowercase_ : int = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
lowercase_ : Dict = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE ):
ans *= n
done.append(__SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase_ : Any = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE ):
ans *= n
done.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
lowercase_ : List[Any] = 0
lowercase_ : Any = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and is_prime(
__SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
assert (
is_prime(__SCREAMING_SNAKE_CASE ) and is_prime(__SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowercase_ : int = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(__SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(__SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
lowercase_ : str = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(__SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase_ : Any = get_divisors(__SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(__SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase_ : List[Any] = gcd(abs(__SCREAMING_SNAKE_CASE ) , abs(__SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
lowercase_ : Union[str, Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
lowercase_ : List[Any] = 0
lowercase_ : Optional[int] = 1
lowercase_ : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase_ : Optional[Any] = ans
ans += fiba
lowercase_ : List[Any] = tmp
return ans
| 425
| 0
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
__snake_case = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
__snake_case = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = 0.0
for i, j in zip(UpperCamelCase_ , UpperCamelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase_ , UpperCamelCase_ ) else 0.0
UpperCamelCase__ :List[Any] = n_correct / len(UpperCamelCase_ )
return {
"accuracy": accuracy,
}
| 711
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase ( A__ ):
"""simple docstring"""
_a = ['pixel_values']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ :Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ :Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='''crop_size''' )
UpperCamelCase__ :Any = do_resize
UpperCamelCase__ :Union[str, Any] = size
UpperCamelCase__ :Any = resample
UpperCamelCase__ :Optional[Any] = do_center_crop
UpperCamelCase__ :List[str] = crop_size
UpperCamelCase__ :Optional[int] = do_rescale
UpperCamelCase__ :Optional[Any] = rescale_factor
UpperCamelCase__ :Any = do_normalize
UpperCamelCase__ :int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ :Union[str, Any] = do_convert_rgb
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ :str = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ :Optional[Any] = size if size is not None else self.size
UpperCamelCase__ :Optional[int] = get_size_dict(UpperCamelCase_ , param_name='''size''' , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :Dict = resample if resample is not None else self.resample
UpperCamelCase__ :int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ :Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ :Any = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ :Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ :Tuple = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ :str = image_std if image_std is not None else self.image_std
UpperCamelCase__ :Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ :str = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ :Any = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ :str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase__ :Optional[Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase__ :Dict = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase__ :Optional[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase__ :Tuple = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
UpperCamelCase__ :List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCamelCase__ :Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 280
| 0
|
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
def get_matched_characters(lowerCAmelCase_ : str, lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowerCAmelCase = int(max(0, i - limit ) )
__lowerCAmelCase = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
__lowerCAmelCase = F"""{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"""
return "".join(lowerCAmelCase_ )
# matching characters
__lowerCAmelCase = get_matched_characters(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = get_matched_characters(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
# transposition
__lowerCAmelCase = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_, lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
__lowerCAmelCase = 0.0
else:
__lowerCAmelCase = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowerCAmelCase = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 53
|
"""simple docstring"""
import random
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = num - 1
_lowerCAmelCase : List[Any] = 0
while s % 2 == 0:
_lowerCAmelCase : Tuple = s // 2
t += 1
for _ in range(5 ):
_lowerCAmelCase : Dict = random.randrange(2 , num - 1 )
_lowerCAmelCase : str = pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if v != 1:
_lowerCAmelCase : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_lowerCAmelCase : str = i + 1
_lowerCAmelCase : List[str] = (v**2) % num
return True
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if num < 2:
return False
_lowerCAmelCase : Any = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase = 1024 ):
'''simple docstring'''
while True:
_lowerCAmelCase : List[str] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_lowerCamelCase ):
return num
if __name__ == "__main__":
_lowerCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 259
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :str = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :List[str] = f'''{dataset}-{pair}'''
UpperCamelCase__ :Any = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :List[Any] = '''val''' if split == '''validation''' else split
UpperCamelCase__ :Optional[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :List[str] = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :List[str] = src_path.open('''w+''' )
UpperCamelCase__ :List[Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :int = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 280
|
'''simple docstring'''
import qiskit
def a ( __a , __a ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCamelCase__ :int = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ :Any = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase__ :Optional[int] = qiskit.execute(__a , __a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 280
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 42
class snake_case_ ( a ):
'''simple docstring'''
def __init__( self, A_, A_, A_, A_, A_, ) -> List[str]:
super().__init__()
self.register_modules(
prior=A_, image_encoder=A_, image_processor=A_, scheduler=A_, renderer=A_, )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_ ) -> str:
if latents is None:
UpperCAmelCase__ =randn_tensor(A_, generator=A_, device=A_, dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase__ =latents.to(A_ )
UpperCAmelCase__ =latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self, A_=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase__ =torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase__ =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_, A_ )
@property
def __UpperCAmelCase ( self ) -> Dict:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder, "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(A_, "_hf_hook" )
and hasattr(module._hf_hook, "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self, A_, A_, A_, A_, ) -> Tuple:
if isinstance(A_, A_ ) and isinstance(image[0], torch.Tensor ):
UpperCAmelCase__ =torch.cat(A_, axis=0 ) if image[0].ndim == 4 else torch.stack(A_, axis=0 )
if not isinstance(A_, torch.Tensor ):
UpperCAmelCase__ =self.image_processor(A_, return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase__ =image.to(dtype=self.image_encoder.dtype, device=A_ )
UpperCAmelCase__ =self.image_encoder(A_ )["last_hidden_state"]
UpperCAmelCase__ =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase__ =image_embeds.repeat_interleave(A_, dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ =torch.zeros_like(A_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self, A_, A_ = 1, A_ = 25, A_ = None, A_ = None, A_ = 4.0, A_ = 64, A_ = "pil", A_ = True, ) -> Tuple:
if isinstance(A_, PIL.Image.Image ):
UpperCAmelCase__ =1
elif isinstance(A_, torch.Tensor ):
UpperCAmelCase__ =image.shape[0]
elif isinstance(A_, A_ ) and isinstance(image[0], (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase__ =len(A_ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(A_ )}""" )
UpperCAmelCase__ =self._execution_device
UpperCAmelCase__ =batch_size * num_images_per_prompt
UpperCAmelCase__ =guidance_scale > 1.0
UpperCAmelCase__ =self._encode_image(A_, A_, A_, A_ )
# prior
self.scheduler.set_timesteps(A_, device=A_ )
UpperCAmelCase__ =self.scheduler.timesteps
UpperCAmelCase__ =self.prior.config.num_embeddings
UpperCAmelCase__ =self.prior.config.embedding_dim
UpperCAmelCase__ =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim), image_embeds.dtype, A_, A_, A_, self.scheduler, )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase__ =latents.reshape(latents.shape[0], A_, A_ )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ =self.scheduler.scale_model_input(A_, A_ )
UpperCAmelCase__ =self.prior(
A_, timestep=A_, proj_embedding=A_, ).predicted_image_embedding
# remove the variance
UpperCAmelCase__ , UpperCAmelCase__ =noise_pred.split(
scaled_model_input.shape[2], dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase__ , UpperCAmelCase__ =noise_pred.chunk(2 )
UpperCAmelCase__ =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase__ =self.scheduler.step(
A_, timestep=A_, sample=A_, ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=A_ )
UpperCAmelCase__ =[]
for i, latent in enumerate(A_ ):
print()
UpperCAmelCase__ =self.renderer.decode(
latent[None, :], A_, size=A_, ray_batch_size=4096, n_coarse_samples=64, n_fine_samples=128, )
images.append(A_ )
UpperCAmelCase__ =torch.stack(A_ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
UpperCAmelCase__ =images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase__ =[self.numpy_to_pil(A_ ) for image in images]
# Offload last model to CPU
if hasattr(self, "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=A_ )
| 625
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'EncodecFeatureExtractor'
__UpperCamelCase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self, A_, A_ ) -> Optional[int]:
super().__init__(A_, A_ )
UpperCAmelCase__ =self.feature_extractor
UpperCAmelCase__ =False
def __UpperCAmelCase ( self, A_=None, A_=None, A_=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A_, language=A_, no_timestamps=A_ )
def __call__( self, *A_, **A_ ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_, **A_ )
UpperCAmelCase__ =kwargs.pop("audio", A_ )
UpperCAmelCase__ =kwargs.pop("sampling_rate", A_ )
UpperCAmelCase__ =kwargs.pop("text", A_ )
if len(A_ ) > 0:
UpperCAmelCase__ =args[0]
UpperCAmelCase__ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
UpperCAmelCase__ =self.tokenizer(A_, **A_ )
if audio is not None:
UpperCAmelCase__ =self.feature_extractor(A_, *A_, sampling_rate=A_, **A_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase__ =audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
UpperCAmelCase__ =audio_inputs["padding_mask"]
return inputs
def __UpperCAmelCase ( self, *A_, **A_ ) -> Dict:
UpperCAmelCase__ =kwargs.pop("audio", A_ )
UpperCAmelCase__ =kwargs.pop("padding_mask", A_ )
if len(A_ ) > 0:
UpperCAmelCase__ =args[0]
UpperCAmelCase__ =args[1:]
if audio_values is not None:
return self._decode_audio(A_, padding_mask=A_ )
else:
return self.tokenizer.batch_decode(*A_, **A_ )
def __UpperCAmelCase ( self, *A_, **A_ ) -> int:
return self.tokenizer.decode(*A_, **A_ )
def __UpperCAmelCase ( self, A_, A_ = None ) -> List[np.ndarray]:
UpperCAmelCase__ =to_numpy(A_ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =audio_values.shape
if padding_mask is None:
return list(A_ )
UpperCAmelCase__ =to_numpy(A_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase__ =seq_len - padding_mask.shape[-1]
UpperCAmelCase__ =1 - self.feature_extractor.padding_value
UpperCAmelCase__ =np.pad(A_, ((0, 0), (0, difference)), "constant", constant_values=A_ )
UpperCAmelCase__ =audio_values.tolist()
for i in range(A_ ):
UpperCAmelCase__ =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase__ =sliced_audio.reshape(A_, -1 )
return audio_values
| 625
| 1
|
def snake_case (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
def get_matched_characters(UpperCamelCase : str , UpperCamelCase : str ) -> str:
lowerCamelCase__ = []
lowerCamelCase__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCamelCase__ = int(max(0 , i - limit ) )
lowerCamelCase__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase )
lowerCamelCase__ = f'''{_stra[0:_stra.index(UpperCamelCase )]} {_stra[_stra.index(UpperCamelCase ) + 1:]}'''
return "".join(UpperCamelCase )
# matching characters
lowerCamelCase__ = get_matched_characters(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = get_matched_characters(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = len(UpperCamelCase )
# transposition
lowerCamelCase__ = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase , UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
lowerCamelCase__ = 0.0
else:
lowerCamelCase__ = (
1
/ 3
* (
match_count / len(UpperCamelCase )
+ match_count / len(UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCamelCase__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 235
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = ort.SessionOptions()
lowerCamelCase__ = False
return options
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
lowerCamelCase__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase__ = """A red cat sitting on a park bench"""
lowerCamelCase__ = np.random.RandomState(0 )
lowerCamelCase__ = pipe(
prompt=a_ , image=a_ , mask_image=a_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=a_ , output_type="""np""" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 235
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : int = {}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'llama'
_lowercase = ['past_key_values']
def __init__( self : Dict , lowerCamelCase__ : str=32_000 , lowerCamelCase__ : Dict=4_096 , lowerCamelCase__ : Tuple=11_008 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : int=None , lowerCamelCase__ : Union[str, Any]="silu" , lowerCamelCase__ : Any=2_048 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : Dict=1E-6 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Union[str, Any] , ):
a__ : List[str] = vocab_size
a__ : str = max_position_embeddings
a__ : Dict = hidden_size
a__ : List[str] = intermediate_size
a__ : Dict = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a__ : Tuple = num_attention_heads
a__ : str = num_key_value_heads
a__ : Dict = hidden_act
a__ : Optional[int] = initializer_range
a__ : str = rms_norm_eps
a__ : Optional[Any] = pretraining_tp
a__ : int = use_cache
a__ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
a__ : Tuple = self.rope_scaling.get("type" , lowerCamelCase__ )
a__ : Tuple = self.rope_scaling.get("factor" , lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 37
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : Any = "bert-generation"
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[Any]=5_0_3_5_8 , lowerCamelCase : Optional[Any]=1_0_2_4 , lowerCamelCase : List[Any]=2_4 , lowerCamelCase : Any=1_6 , lowerCamelCase : Optional[int]=4_0_9_6 , lowerCamelCase : Any="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Union[str, Any]=5_1_2 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : str=1e-12 , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : Union[str, Any]=1 , lowerCamelCase : Any="absolute" , lowerCamelCase : str=True , **lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = initializer_range
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = use_cache
| 489
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
__snake_case : Any = 5
# Realm tok
__snake_case : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__snake_case : List[str] = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(a_ , exist_ok=a_ )
__snake_case : List[str] = os.path.join(a_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(a_ , exist_ok=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=a_ , )
return block_records
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.get_config()
__snake_case : List[Any] = self.get_dummy_retriever()
__snake_case : Optional[Any] = retriever.tokenizer
__snake_case : List[Any] = np.array([0, 3] , dtype='''long''' )
__snake_case : Dict = tokenizer(['''Test question'''] ).input_ids
__snake_case : List[str] = tokenizer(
['''the fourth'''] , add_special_tokens=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , ).input_ids
__snake_case : str = config.reader_seq_len
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = retriever(
a_ , a_ , answer_ids=a_ , max_length=a_ , return_tensors='''np''' )
self.assertEqual(len(a_ ) , 2 )
self.assertEqual(len(a_ ) , 2 )
self.assertEqual(len(a_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.get_config()
__snake_case : Optional[int] = self.get_dummy_retriever()
__snake_case : Optional[Any] = retriever.tokenizer
__snake_case : Tuple = np.array([0, 3, 5] , dtype='''long''' )
__snake_case : List[Any] = tokenizer(['''Test question'''] ).input_ids
__snake_case : int = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , ).input_ids
__snake_case : Union[str, Any] = config.reader_seq_len
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = retriever(
a_ , a_ , answer_ids=a_ , max_length=a_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , a_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , a_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
__snake_case : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
__snake_case : Union[str, Any] = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
__snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 229
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229
| 1
|
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( a : Optional[Any] ):
a__ = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , a__ ).groups()[0]
class _UpperCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self , _a , _a=None , _a=None ):
"""simple docstring"""
a__ = file_names
a__ = image_transform
a__ = label_to_id
def __len__( self ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self , _a ):
"""simple docstring"""
a__ = self.file_names[idx]
a__ = PIL.Image.open(_a )
a__ = raw_image.convert('RGB' )
if self.image_transform is not None:
a__ = self.image_transform(_a )
a__ = extract_label(_a )
if self.label_to_id is not None:
a__ = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( a : List[Any] , a : List[Any] ):
if args.with_tracking:
a__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
a__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config['lr']
a__ = int(config['num_epochs'] )
a__ = int(config['seed'] )
a__ = int(config['batch_size'] )
a__ = config['image_size']
if not isinstance(a__ , (list, tuple) ):
a__ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
a__ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a__ = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
a__ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a__ = os.path.split(a__ )[-1].split('.' )[0]
accelerator.init_trackers(a__ , a__ )
# Grab all the image filenames
a__ = [os.path.join(args.data_dir , a__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
a__ = [extract_label(a__ ) for fname in file_names]
a__ = list(set(a__ ) )
id_to_label.sort()
a__ = {lbl: i for i, lbl in enumerate(a__ )}
# Set the seed before splitting the data.
np.random.seed(a__ )
torch.manual_seed(a__ )
torch.cuda.manual_seed_all(a__ )
# Split our filenames between train and validation
a__ = np.random.permutation(len(a__ ) )
a__ = int(0.8 * len(a__ ) )
a__ = random_perm[:cut]
a__ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a__ = Compose([RandomResizedCrop(a__ , scale=(0.5, 1.0) ), ToTensor()] )
a__ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=a__ , label_to_id=a__ )
# For evaluation, we use a deterministic Resize
a__ = Compose([Resize(a__ ), ToTensor()] )
a__ = PetsDataset([file_names[i] for i in eval_split] , image_transform=a__ , label_to_id=a__ )
# Instantiate dataloaders.
a__ = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 )
a__ = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = create_model('resnet50d' , pretrained=a__ , num_classes=len(a__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a__ = False
for param in model.get_classifier().parameters():
a__ = True
# We normalize the batches of images to be a bit faster.
a__ = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
a__ = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a__ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
a__ = OneCycleLR(optimizer=a__ , max_lr=a__ , epochs=a__ , steps_per_epoch=len(a__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# We need to keep track of how many total steps we have iterated over
a__ = 0
# We also need to keep track of the starting epoch so files are named properly
a__ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
a__ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a__ = os.path.splitext(a__ )[0]
if "epoch" in training_difference:
a__ = int(training_difference.replace('epoch_' , '' ) ) + 1
a__ = None
else:
a__ = int(training_difference.replace('step_' , '' ) )
a__ = resume_step // len(a__ )
resume_step -= starting_epoch * len(a__ )
# Now we train the model
for epoch in range(a__ , a__ ):
model.train()
if args.with_tracking:
a__ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a__ = accelerator.skip_first_batches(a__ , a__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a__ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
a__ = (batch['image'] - mean) / std
a__ = model(a__ )
a__ = torch.nn.functional.cross_entropy(a__ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(a__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(a__ , a__ ):
a__ = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a__ = os.path.join(args.output_dir , a__ )
accelerator.save_state(a__ )
model.eval()
a__ = 0
a__ = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
a__ = (batch['image'] - mean) / std
with torch.no_grad():
a__ = model(a__ )
a__ = outputs.argmax(dim=-1 )
a__ = accelerator.gather_for_metrics((predictions, batch['label']) )
a__ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a__ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(a__ ),
'epoch': epoch,
} , step=a__ , )
if checkpointing_steps == "epoch":
a__ = f'''epoch_{epoch}'''
if args.output_dir is not None:
a__ = os.path.join(args.output_dir , a__ )
accelerator.save_state(a__ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=a__ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=a__ , default=a__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=a__ , default=a__ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=a__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=a__ , default=a__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=a__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
a__ = parser.parse_args()
a__ = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 394
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str = None
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : int , snake_case : Dict , snake_case : int , snake_case : str , **snake_case : Optional[int] ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : str , snake_case : Dict ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ):
"""simple docstring"""
return F"""`pip install {cls.pip_package or cls.name}`"""
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''optuna'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_optuna_available()
def __UpperCAmelCase ( self : Any , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : Optional[int] ):
"""simple docstring"""
return run_hp_search_optuna(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : Optional[Any] , snake_case : Dict ):
"""simple docstring"""
return default_hp_space_optuna(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''ray'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''\'ray[tune]\''''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_ray_available()
def __UpperCAmelCase ( self : List[str] , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : str ):
"""simple docstring"""
return run_hp_search_ray(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : int , snake_case : Optional[Any] ):
"""simple docstring"""
return default_hp_space_ray(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''sigopt'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_sigopt_available()
def __UpperCAmelCase ( self : int , snake_case : Optional[int] , snake_case : int , snake_case : str , **snake_case : Dict ):
"""simple docstring"""
return run_hp_search_sigopt(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : int , snake_case : List[Any] ):
"""simple docstring"""
return default_hp_space_sigopt(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''wandb'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_wandb_available()
def __UpperCAmelCase ( self : Dict , snake_case : List[str] , snake_case : int , snake_case : str , **snake_case : Optional[Any] ):
"""simple docstring"""
return run_hp_search_wandb(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : int ):
"""simple docstring"""
return default_hp_space_wandb(snake_case )
SCREAMING_SNAKE_CASE_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a__) > 0:
_snake_case : Any = available_backends[0].name
if len(a__) > 1:
logger.info(
F"""{len(a__)} hyperparameter search backends available. Using {name} as the default.""")
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
| 517
| 0
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def snake_case__ ( UpperCAmelCase : int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCAmelCase : float , UpperCAmelCase : float ) -> bool:
lowerCAmelCase__ :Optional[int] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase__ :Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase__ :List[Any] = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def snake_case__ ( UpperCAmelCase : int , UpperCAmelCase : Callable[[float], float] , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 , ):
return mean(
function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value)
def snake_case__ ( UpperCAmelCase : int , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 ):
def identity_function(UpperCAmelCase : float ) -> float:
return x
lowerCAmelCase__ :List[str] = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase__ :Tuple = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def snake_case__ ( UpperCAmelCase : int ):
def function_to_integrate(UpperCAmelCase : float ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase__ :int = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = ['''vqvae''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , mel=_lowerCAmelCase , vqvae=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , _lowerCAmelCase ) else 1_000
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :str = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCAmelCase )
lowerCAmelCase__ :Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase__ :Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase__ :Optional[int] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_lowerCAmelCase , device=self.device , )
lowerCAmelCase__ :Union[str, Any] = noise
lowerCAmelCase__ :Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Dict = self.mel.audio_slice_to_image(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase__ :Tuple = (input_image / 255) * 2 - 1
lowerCAmelCase__ :Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase__ :str = self.vqvae.encode(torch.unsqueeze(_lowerCAmelCase , 0 ) ).latent_dist.sample(
generator=_lowerCAmelCase )[0]
lowerCAmelCase__ :Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase__ :Dict = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase__ :Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase__ :Dict = int(mask_start_secs * pixels_per_second )
lowerCAmelCase__ :Tuple = int(mask_end_secs * pixels_per_second )
lowerCAmelCase__ :str = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _lowerCAmelCase ):
lowerCAmelCase__ :Optional[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["sample"]
else:
lowerCAmelCase__ :Dict = self.unet(_lowerCAmelCase , _lowerCAmelCase )["sample"]
if isinstance(self.scheduler , _lowerCAmelCase ):
lowerCAmelCase__ :Any = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , )["prev_sample"]
else:
lowerCAmelCase__ :List[str] = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , generator=_lowerCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowerCAmelCase__ :List[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase__ :Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase__ :Any = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase__ :List[Any] = self.vqvae.decode(_lowerCAmelCase )["sample"]
lowerCAmelCase__ :Dict = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ :Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase__ :Optional[int] = (images * 255).round().astype("uint8" )
lowerCAmelCase__ :Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
lowerCAmelCase__ :Optional[Any] = [self.mel.image_to_audio(_lowerCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_lowerCAmelCase ) )
@torch.no_grad()
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , _lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase )
lowerCAmelCase__ :Any = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase__ :Dict = (sample / 255) * 2 - 1
lowerCAmelCase__ :Optional[Any] = torch.Tensor(_lowerCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase__ :List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase__ :Any = self.scheduler.alphas_cumprod[t]
lowerCAmelCase__ :List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase__ :List[str] = 1 - alpha_prod_t
lowerCAmelCase__ :List[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase )["sample"]
lowerCAmelCase__ :int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase__ :List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase__ :Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def snake_case_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = acos(torch.dot(torch.flatten(_lowerCAmelCase ) , torch.flatten(_lowerCAmelCase ) ) / torch.norm(_lowerCAmelCase ) / torch.norm(_lowerCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCAmelCase ) + sin(alpha * theta ) * xa / sin(_lowerCAmelCase )
| 111
| 1
|
'''simple docstring'''
def A ( UpperCamelCase_ : int ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
lowerCAmelCase__ = len(bin(UpperCamelCase_ )[3:] )
lowerCAmelCase__ = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
lowerCAmelCase__ = (
(
"1"
+ "0" * (binary_number_length - len(UpperCamelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 707
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694
| 0
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__snake_case :Optional[Any] =logging.get_logger(__name__)
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
A = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
A = json.loads(lowerCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
A = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
A = json.loads(lowerCAmelCase__ )
if not mpi_options.get('sagemaker_mpi_enabled' , lowerCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , __UpperCamelCase , )
@cached_property
def __UpperCamelCase ( self : List[str] ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
A = torch.device('cpu' )
A = 0
elif is_sagemaker_model_parallel_available():
A = smp.local_rank()
A = torch.device('cuda' , __UpperCamelCase )
A = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
A = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
A = torch.device('cuda' , self.local_rank )
A = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
A = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
A = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
A = torch.device('cuda' , self.local_rank )
A = 1
if device.type == "cuda":
torch.cuda.set_device(__UpperCamelCase )
return device
@property
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCamelCase ( self : Any ) -> Dict:
return not is_sagemaker_model_parallel_available()
@property
def __UpperCamelCase ( self : int ) -> Any:
return False
| 106
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :List[str] =logging.get_logger(__name__)
__snake_case :int ={'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Optional[Any] = 'openai-gpt'
A_ : Any = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , __UpperCamelCase : List[str]=40_478 , __UpperCamelCase : List[Any]=512 , __UpperCamelCase : List[Any]=768 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Dict=12 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=1e-5 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : str="cls_index" , __UpperCamelCase : int=True , __UpperCamelCase : int=None , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Tuple=0.1 , **__UpperCamelCase : List[Any] , ) -> List[str]:
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = afn
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = summary_type
A = summary_use_proj
A = summary_activation
A = summary_first_dropout
A = summary_proj_to_labels
super().__init__(**__UpperCamelCase )
| 106
| 1
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_ : int = logging.getLogger()
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser()
parser.add_argument("-f" )
_a = parser.parse_args()
return args.f
class __SCREAMING_SNAKE_CASE (UpperCAmelCase__ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
_a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def UpperCamelCase__ ( self : Optional[int] , __a : Dict ):
_a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase__ , "argv" , lowerCamelCase__ ):
_a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ , 0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase__ )
_a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase__ )
_a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase__ )
| 718
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__a : Optional[int] , **__a : List[Any] ):
pass
def _lowerCamelCase ( lowercase : Image ) -> str:
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : int , __a : Tuple ):
_a = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self : int , __a : Union[str, Any] , __a : str ):
_a = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __a )
import datasets
_a = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_a = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCamelCase__ ( self : List[Any] ):
pass
@slow
@require_torch
def UpperCamelCase__ ( self : List[str] ):
_a = "Intel/dpt-large"
_a = pipeline("depth-estimation" , model=__a )
_a = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_a = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def UpperCamelCase__ ( self : Tuple ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 521
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( a : int ):
assert (
isinstance(a , a ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
a__ , a__ = 1, 1
for _ in range(number_of_steps - 1 ):
a__ , a__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394
|
'''simple docstring'''
def lowerCAmelCase_ ( a : int ):
a__ = generate_pascal_triangle(a )
for row_idx in range(a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def lowerCAmelCase_ ( a : int ):
if not isinstance(a , a ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(a ):
a__ = populate_current_row(a , a )
triangle.append(a )
return triangle
def lowerCAmelCase_ ( a : list[list[int]] , a : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , a ):
calculate_current_element(
a , a , a , a )
return current_row
def lowerCAmelCase_ ( a : list[list[int]] , a : list[int] , a : int , a : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def lowerCAmelCase_ ( a : int ):
if not isinstance(a , a ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , a ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(a , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(a )
return result
def lowerCAmelCase_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a : Callable , a : int ) -> None:
a__ = f'''{func.__name__}({value})'''
a__ = timeit(f'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a , a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 394
| 1
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Any = np.argmax(_lowercase , axis=1 )
return np.sum(outputs == labels )
def __UpperCAmelCase( lowercase_ ):
with open(_lowercase , encoding='''utf_8''' ) as f:
_lowerCamelCase : List[str] = csv.reader(_lowercase )
_lowerCamelCase : Tuple = []
next(_lowercase ) # skip the first line
for line in tqdm(_lowercase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : Any = []
for dataset in encoded_datasets:
_lowerCamelCase : int = len(_lowercase )
_lowerCamelCase : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_lowerCamelCase : Optional[int] = np.zeros((n_batch, 2) , dtype=np.intaa )
_lowerCamelCase : str = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
_lowerCamelCase : Any = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowercase ):
_lowerCamelCase : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCamelCase : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCamelCase : Optional[int] = with_conta
_lowerCamelCase : List[Any] = with_conta
_lowerCamelCase : Optional[int] = len(_lowercase ) - 1
_lowerCamelCase : Optional[Any] = len(_lowercase ) - 1
_lowerCamelCase : List[Any] = with_conta
_lowerCamelCase : Tuple = with_conta
_lowerCamelCase : Any = mc_label
_lowerCamelCase : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) )
return tensor_datasets
def __UpperCAmelCase( ):
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_lowercase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_lowercase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_lowercase , default='''''' )
parser.add_argument('''--seed''' , type=_lowercase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_lowercase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_lowercase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_lowercase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=_lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_lowercase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_lowercase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_lowercase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_lowercase , default=6.25e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_lowercase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_lowercase , default=0.0_1 )
parser.add_argument('''--lm_coef''' , type=_lowercase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_lowercase , default=3_74 )
parser.add_argument('''--server_ip''' , type=_lowercase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_lowercase , default='''''' , help='''Can be used for distant debugging.''' )
_lowerCamelCase : Dict = parser.parse_args()
print(_lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_lowerCamelCase : int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_lowerCamelCase : Union[str, Any] = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_lowercase , _lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_lowerCamelCase : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
_lowerCamelCase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowercase )
_lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
_lowerCamelCase : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowercase ) )
model.to(_lowercase )
# Load and encode the datasets
def tokenize_and_encode(lowercase_ ):
if isinstance(_lowercase , _lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) )
elif isinstance(_lowercase , _lowercase ):
return obj
return [tokenize_and_encode(_lowercase ) for o in obj]
logger.info('''Encoding dataset...''' )
_lowerCamelCase : Any = load_rocstories_dataset(args.train_dataset )
_lowerCamelCase : List[str] = load_rocstories_dataset(args.eval_dataset )
_lowerCamelCase : Dict = (train_dataset, eval_dataset)
_lowerCamelCase : Optional[int] = tokenize_and_encode(_lowercase )
# Compute the max input length for the Transformer
_lowerCamelCase : Optional[Any] = model.config.n_positions // 2 - 2
_lowerCamelCase : List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_lowerCamelCase : List[str] = min(_lowercase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_lowerCamelCase : Optional[int] = pre_process_datasets(_lowercase , _lowercase , _lowercase , *_lowercase )
_lowerCamelCase : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
_lowerCamelCase : Any = TensorDataset(*_lowercase )
_lowerCamelCase : Optional[Any] = RandomSampler(_lowercase )
_lowerCamelCase : Union[str, Any] = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.train_batch_size )
_lowerCamelCase : Optional[int] = TensorDataset(*_lowercase )
_lowerCamelCase : List[Any] = SequentialSampler(_lowercase )
_lowerCamelCase : Optional[Any] = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_lowerCamelCase : Tuple = args.max_steps
_lowerCamelCase : List[str] = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1
else:
_lowerCamelCase : Dict = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
_lowerCamelCase : Optional[int] = list(model.named_parameters() )
_lowerCamelCase : Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_lowerCamelCase : Tuple = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_lowerCamelCase : Tuple = AdamW(_lowercase , lr=args.learning_rate , eps=args.adam_epsilon )
_lowerCamelCase : Optional[int] = get_linear_schedule_with_warmup(
_lowercase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowercase )
if args.do_train:
_lowerCamelCase : int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Dict = tqdm(_lowercase , desc='''Training''' )
for step, batch in enumerate(_lowercase ):
_lowerCamelCase : Dict = tuple(t.to(_lowercase ) for t in batch )
_lowerCamelCase : Dict = batch
_lowerCamelCase : Optional[Any] = model(_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
_lowerCamelCase : Any = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_lowerCamelCase : str = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_lowerCamelCase : Dict = 'Training loss: {:.2e} lr: {:.2e}'.format(_lowercase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_lowerCamelCase : List[str] = model.module if hasattr(_lowercase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_lowerCamelCase : Optional[int] = os.path.join(args.output_dir , _lowercase )
_lowerCamelCase : List[Any] = os.path.join(args.output_dir , _lowercase )
torch.save(model_to_save.state_dict() , _lowercase )
model_to_save.config.to_json_file(_lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_lowerCamelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_lowerCamelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowercase )
if args.do_eval:
model.eval()
_lowerCamelCase : List[Any] = 0, 0
_lowerCamelCase : List[str] = 0, 0
for batch in tqdm(_lowercase , desc='''Evaluating''' ):
_lowerCamelCase : str = tuple(t.to(_lowercase ) for t in batch )
_lowerCamelCase : Any = batch
with torch.no_grad():
_lowerCamelCase : Tuple = model(
_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
_lowerCamelCase : List[str] = mc_logits.detach().cpu().numpy()
_lowerCamelCase : Any = mc_labels.to('''cpu''' ).numpy()
_lowerCamelCase : int = accuracy(_lowercase , _lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_lowerCamelCase : Tuple = eval_loss / nb_eval_steps
_lowerCamelCase : Optional[int] = eval_accuracy / nb_eval_examples
_lowerCamelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
_lowerCamelCase : List[Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_lowerCamelCase : Optional[Any] = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _lowercase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3FeatureExtractor']
_lowerCamelCase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 613
| 0
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase=2 , __lowercase=3 , __lowercase=64 , __lowercase=None) -> Tuple:
__UpperCamelCase :int = np.random.default_rng(__lowercase)
__UpperCamelCase :Union[str, Any] = length
__UpperCamelCase :Tuple = rng.normal(size=(length,)).astype(np.floataa)
__UpperCamelCase :Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self) -> Union[str, Any]:
return self.length
def __getitem__( self , __lowercase) -> str:
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase=0 , __lowercase=0 , __lowercase=False) -> Any:
super().__init__()
__UpperCamelCase :Any = torch.nn.Parameter(torch.tensor([2, 3]).float())
__UpperCamelCase :Dict = torch.nn.Parameter(torch.tensor([2, 3]).float())
__UpperCamelCase :Any = True
def UpperCamelCase__ ( self , __lowercase=None) -> Dict:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
__UpperCamelCase :Dict = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase=0 , __lowercase=0 , __lowercase=False) -> int:
super().__init__()
__UpperCamelCase :Optional[int] = torch.nn.Parameter(torch.tensor(__lowercase).float())
__UpperCamelCase :Optional[int] = torch.nn.Parameter(torch.tensor(__lowercase).float())
__UpperCamelCase :str = True
def UpperCamelCase__ ( self , __lowercase=None) -> Optional[Any]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
__UpperCamelCase :Union[str, Any] = False
return x * self.a + self.b
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCamelCase :int = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__UpperCamelCase :Dict = load_dataset('''csv''' , data_files=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = datasets['''train'''].unique('''label''' )
__UpperCamelCase :Optional[Any] = {v: i for i, v in enumerate(SCREAMING_SNAKE_CASE )}
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :str = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='''max_length''' )
if "label" in examples:
__UpperCamelCase :List[Any] = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Optional[int] = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Optional[int] = DataLoader(tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=2 )
__UpperCamelCase :Union[str, Any] = DataLoader(tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=1 )
return train_dataloader, eval_dataloader
| 167
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
__UpperCamelCase :Union[str, Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE )
return dataset
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = get_dataset()
__UpperCamelCase :Dict = make_duplicate_clusters(__lowercase , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = get_dataset()
__UpperCamelCase , __UpperCamelCase :Dict = deduplicate_dataset(__lowercase)
self.assertEqual(len(__lowercase) , 2)
print(__lowercase)
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2)
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __lowercase)
| 167
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowercase_ ( __A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =SwinConfig(image_size=1_9_2 )
if "base" in model_name:
lowercase : Dict =6
lowercase : Union[str, Any] =1_2_8
lowercase : int =(2, 2, 1_8, 2)
lowercase : str =(4, 8, 1_6, 3_2)
elif "large" in model_name:
lowercase : List[str] =1_2
lowercase : Optional[Any] =1_9_2
lowercase : int =(2, 2, 1_8, 2)
lowercase : int =(6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowercase : Union[str, Any] =window_size
lowercase : List[Any] =embed_dim
lowercase : Union[str, Any] =depths
lowercase : List[Any] =num_heads
return config
def lowercase_ ( __A : Tuple ) -> List[str]:
"""simple docstring"""
if "encoder.mask_token" in name:
lowercase : List[Any] =name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowercase : List[Any] =name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowercase : Union[str, Any] =name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowercase : Optional[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Any =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : int =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : List[str] =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase : List[Any] ='''layernorm.weight'''
if name == "encoder.norm.bias":
lowercase : Union[str, Any] ='''layernorm.bias'''
if "decoder" in name:
pass
else:
lowercase : Optional[int] ='''swin.''' + name
return name
def lowercase_ ( __A : Optional[Any] , __A : List[Any] ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : Tuple =orig_state_dict.pop(__A )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase : int =key.split('''.''' )
lowercase : Union[str, Any] =int(key_split[2] )
lowercase : Any =int(key_split[4] )
lowercase : str =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Tuple =val[:dim, :]
lowercase : str =val[
dim : dim * 2, :
]
lowercase : Optional[int] =val[-dim:, :]
else:
lowercase : int =val[
:dim
]
lowercase : List[Any] =val[
dim : dim * 2
]
lowercase : Optional[Any] =val[
-dim:
]
else:
lowercase : Optional[Any] =val
return orig_state_dict
def lowercase_ ( __A : List[str] , __A : str , __A : str , __A : List[Any] ) -> int:
"""simple docstring"""
lowercase : Any =torch.load(__A , map_location='''cpu''' )['''model''']
lowercase : Optional[int] =get_swin_config(__A )
lowercase : Optional[Any] =SwinForMaskedImageModeling(__A )
model.eval()
lowercase : Optional[Any] =convert_state_dict(__A , __A )
model.load_state_dict(__A )
lowercase : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Union[str, Any] =ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
lowercase : Any =Image.open(requests.get(__A , stream=__A ).raw )
lowercase : Any =image_processor(images=__A , return_tensors='''pt''' )
with torch.no_grad():
lowercase : Tuple =model(**__A ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if push_to_hub:
print(F'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(F'microsoft/{model_name}' )
image_processor.push_to_hub(F'microsoft/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 701
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : int=7 , lowerCAmelCase : Any=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : str=2 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : List[Any]=37 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : int=None , ):
lowercase : List[Any] = parent
lowercase : Optional[Any] = batch_size
lowercase : Dict = seq_length
lowercase : List[Any] = is_training
lowercase : Optional[int] = use_input_mask
lowercase : Optional[Any] = use_labels
lowercase : List[str] = vocab_size
lowercase : Dict = hidden_size
lowercase : int = projection_dim
lowercase : Tuple = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : List[Any] = dropout
lowercase : Union[str, Any] = attention_dropout
lowercase : Any = max_position_embeddings
lowercase : Optional[Any] = initializer_range
lowercase : Optional[int] = scope
lowercase : Tuple = bos_token_id
def _lowerCAmelCase ( self : Dict ):
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str = None
if self.use_input_mask:
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase : str = input_mask.numpy()
lowercase , lowercase : Dict = input_mask.shape
lowercase : str = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase ):
lowercase : Optional[int] = 1
lowercase : Dict = 0
lowercase : Any = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase )
def _lowerCAmelCase ( self : List[str] ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
lowercase : List[Any] = TFBlipTextModel(config=lowerCAmelCase )
lowercase : Optional[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , training=lowerCAmelCase )
lowercase : str = model(lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : str ):
lowercase : int = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Tuple = config_and_inputs
lowercase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
a__: Dict = (TFBlipTextModel,) if is_tf_available() else ()
a__: List[str] = False
a__: Any = False
a__: List[str] = False
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : List[Any] = BlipTextModelTester(self )
lowercase : List[str] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Tuple ):
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _lowerCAmelCase ( self : Tuple ):
pass
def _lowerCAmelCase ( self : Dict ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _lowerCAmelCase ( self : int ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@slow
def _lowerCAmelCase ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFBlipTextModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCAmelCase ( self : int , lowerCAmelCase : int=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase )
| 583
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class UpperCAmelCase ( __lowerCamelCase ):
a__: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__: ClassVar[Features] = Features({"""audio""": Audio()} )
a__: ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
a__: str = "audio"
a__: str = "transcription"
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
lowercase : str = copy.deepcopy(self )
lowercase : List[Any] = self.input_schema.copy()
lowercase : Optional[Any] = features[self.audio_column]
lowercase : str = input_schema
return task_template
@property
def _lowerCAmelCase ( self : Dict ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 583
| 1
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : str = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : str = '''distilbert'''
SCREAMING_SNAKE_CASE : int = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=4 * 768 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : int = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = sinusoidal_pos_embds
SCREAMING_SNAKE_CASE_ : List[str] = n_layers
SCREAMING_SNAKE_CASE_ : List[str] = n_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = dim
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : int = activation
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = qa_dropout
SCREAMING_SNAKE_CASE_ : Dict = seq_classif_dropout
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE )
class _A ( __magic_name__):
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 511
|
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = int(a )
if n_element < 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = ValueError('a should be a positive number' )
raise my_error
SCREAMING_SNAKE_CASE_ : str = [1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (0, 0, 0)
SCREAMING_SNAKE_CASE_ : List[Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase : List[str] = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowerCAmelCase : Tuple = hamming(int(n))
print('-----------------------------------------------------')
print(F'The list with nth numbers is: {hamming_numbers}')
print('-----------------------------------------------------')
| 511
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_UpperCamelCase = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
_UpperCamelCase = GPTaTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ) ->List[Any]:
'''simple docstring'''
super().__init__(
A_ , A_ , tokenizer_file=A_ , unk_token=A_ , bos_token=A_ , eos_token=A_ , add_prefix_space=A_ , **A_ , )
__lowerCAmelCase : Any = kwargs.pop('''add_bos_token''' , A_ )
__lowerCAmelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
__lowerCAmelCase : Optional[int] = getattr(A_ , pre_tok_state.pop('''type''' ) )
__lowerCAmelCase : Dict = add_prefix_space
__lowerCAmelCase : Dict = pre_tok_class(**A_ )
__lowerCAmelCase : Dict = add_prefix_space
def UpperCamelCase__ ( self , *A_ , **A_ ) ->BatchEncoding:
'''simple docstring'''
__lowerCAmelCase : List[str] = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->BatchEncoding:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def UpperCamelCase__ ( self , A_ ) ->List[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
__lowerCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 583
|
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = len(lowercase__ )
__lowerCAmelCase : Any = len(lowercase__ )
__lowerCAmelCase : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowerCAmelCase : Optional[Any] = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowerCAmelCase : Union[str, Any] = True
if a[i].islower():
__lowerCAmelCase : Optional[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 583
| 1
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 198
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int = 2_00 ):
lowercase_ : str = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
lowercase_ : Dict = [0] * (pence + 1)
lowercase_ : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 425
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase( _a ):
snake_case_ : Dict = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case = {
"num_train_timesteps": 1_0_0_0,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE , prev_timestep=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(variance_type="fixed_small_log" )
__snake_case = scheduler_class(**SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9994987 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[Any]:
'''simple docstring'''
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(variance_type="learned_range" )
__snake_case = scheduler_class(**SCREAMING_SNAKE_CASE )
__snake_case = 0.5
assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE ) - -10.1712790 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=SCREAMING_SNAKE_CASE ) - -5.7998052 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=SCREAMING_SNAKE_CASE ) - -0.0010011 < 1e-5
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**SCREAMING_SNAKE_CASE )
__snake_case = scheduler.timesteps
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
__snake_case = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. predict noise residual
__snake_case = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__snake_case = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
__snake_case = pred_prev_sample
__snake_case = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
__snake_case = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(2_5 )
__snake_case = scheduler.timesteps
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
__snake_case = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. predict noise residual
__snake_case = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if i + 1 == timesteps.shape[0]:
__snake_case = None
else:
__snake_case = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__snake_case = scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_timestep=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
__snake_case = pred_prev_sample
__snake_case = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
__snake_case = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
| 473
|
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
__snake_case = Github(os.environ["GITHUB_TOKEN"] )
__snake_case = g.get_repo("huggingface/diffusers" )
__snake_case = repo.get_issues(state="open" )
for issue in open_issues:
__snake_case = sorted(issue.get_comments() , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
__snake_case = comments[0] if len(_lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 473
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
_UpperCamelCase = """audio-spectrogram-transformer"""
def __init__( self : int , _lowercase : Union[str, Any]=768 , _lowercase : Dict=12 , _lowercase : int=12 , _lowercase : Optional[int]=3_072 , _lowercase : Tuple="gelu" , _lowercase : Dict=0.0 , _lowercase : int=0.0 , _lowercase : Tuple=0.02 , _lowercase : str=1E-12 , _lowercase : Optional[Any]=16 , _lowercase : Optional[Any]=True , _lowercase : Tuple=10 , _lowercase : Union[str, Any]=10 , _lowercase : Any=1_024 , _lowercase : List[Any]=128 , **_lowercase : Tuple , ) -> Optional[Any]:
super().__init__(**_lowercase)
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = layer_norm_eps
A_ = patch_size
A_ = qkv_bias
A_ = frequency_stride
A_ = time_stride
A_ = max_length
A_ = num_mel_bins
| 366
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _lowercase : Any , _lowercase : Tuple=7 , _lowercase : Tuple=3 , _lowercase : str=18 , _lowercase : Union[str, Any]=30 , _lowercase : Dict=400 , _lowercase : int=True , _lowercase : List[Any]=None , _lowercase : int=True , _lowercase : Optional[int]=False , _lowercase : str=True , _lowercase : Union[str, Any]=True , _lowercase : Any=[0.5, 0.5, 0.5] , _lowercase : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size if size is not None else {'height': 18, 'width': 20}
A_ = do_thumbnail
A_ = do_align_axis
A_ = do_pad
A_ = do_normalize
A_ = image_mean
A_ = image_std
def __snake_case ( self : int) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = DonutImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int]) -> Union[str, Any]:
A_ = DonutImageProcessingTester(self)
@property
def __snake_case ( self : int) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Any) -> Tuple:
A_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , 'do_resize'))
self.assertTrue(hasattr(_lowercase , 'size'))
self.assertTrue(hasattr(_lowercase , 'do_thumbnail'))
self.assertTrue(hasattr(_lowercase , 'do_align_long_axis'))
self.assertTrue(hasattr(_lowercase , 'do_pad'))
self.assertTrue(hasattr(_lowercase , 'do_normalize'))
self.assertTrue(hasattr(_lowercase , 'image_mean'))
self.assertTrue(hasattr(_lowercase , 'image_std'))
def __snake_case ( self : int) -> Optional[int]:
A_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 20})
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
# Previous config had dimensions in (width, height) order
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84))
self.assertEqual(image_processor.size , {'height': 84, 'width': 42})
def __snake_case ( self : Optional[Any]) -> Optional[Any]:
pass
@is_flaky()
def __snake_case ( self : List[str]) -> int:
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ = image_processing(_lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __snake_case ( self : List[Any]) -> List[Any]:
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ = image_processing(_lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __snake_case ( self : Dict) -> int:
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ = image_processing(_lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 366
| 1
|
from collections.abc import Generator
from math import sin
def A_ ( lowercase_ ) ->Dict:
"""simple docstring"""
if len(__A ) != 3_2:
raise ValueError('Input must be of length 32' )
SCREAMING_SNAKE_CASE = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
SCREAMING_SNAKE_CASE = format(__A , '08x' )[-8:]
SCREAMING_SNAKE_CASE = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def A_ ( lowercase_ ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = b''
for char in message:
bit_string += format(__A , '08b' ).encode('utf-8' )
SCREAMING_SNAKE_CASE = format(len(__A ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__A ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def A_ ( lowercase_ ) ->List[str]:
"""simple docstring"""
if len(__A ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__A ) , 5_1_2 ):
SCREAMING_SNAKE_CASE = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def A_ ( lowercase_ ) ->int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
SCREAMING_SNAKE_CASE = format(__A , '032b' )
SCREAMING_SNAKE_CASE = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__A , 2 )
def A_ ( lowercase_ , lowercase_ ) ->List[Any]:
"""simple docstring"""
return (a + b) % 2**3_2
def A_ ( lowercase_ , lowercase_ ) ->Tuple:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def A_ ( lowercase_ ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = preprocess(__A )
SCREAMING_SNAKE_CASE = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE = 0X67_452_301
SCREAMING_SNAKE_CASE = 0Xef_cda_b89
SCREAMING_SNAKE_CASE = 0X98_bad_cfe
SCREAMING_SNAKE_CASE = 0X10_325_476
SCREAMING_SNAKE_CASE = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__A ):
SCREAMING_SNAKE_CASE = aa
SCREAMING_SNAKE_CASE = ba
SCREAMING_SNAKE_CASE = ca
SCREAMING_SNAKE_CASE = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE = b ^ c ^ d
SCREAMING_SNAKE_CASE = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE = c ^ (b | not_aa(__A ))
SCREAMING_SNAKE_CASE = (7 * i) % 1_6
SCREAMING_SNAKE_CASE = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE = d
SCREAMING_SNAKE_CASE = c
SCREAMING_SNAKE_CASE = b
SCREAMING_SNAKE_CASE = sum_aa(__A , left_rotate_aa(__A , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE = sum_aa(__A , __A )
SCREAMING_SNAKE_CASE = sum_aa(__A , __A )
SCREAMING_SNAKE_CASE = sum_aa(__A , __A )
SCREAMING_SNAKE_CASE = sum_aa(__A , __A )
SCREAMING_SNAKE_CASE = reformat_hex(__A ) + reformat_hex(__A ) + reformat_hex(__A ) + reformat_hex(__A )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
def A_ ( lowercase_ , lowercase_ ) ->int:
"""simple docstring"""
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('String lengths must match!' )
SCREAMING_SNAKE_CASE = 0
for chara, chara in zip(lowercase_ , lowercase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str=1_3 , __UpperCamelCase : Optional[int]=6_4 , __UpperCamelCase : Any=2 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : str=5 , __UpperCamelCase : Any=4 , __UpperCamelCase : Any=3_7 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : str=[1, 1_6, 4, 4] , __UpperCamelCase : int=None , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCAmelCase = (self.image_size // 3_2) ** 2
_UpperCAmelCase = num_patches + 1
def lowercase__ ( self : Optional[Any] )->Optional[Any]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCamelCase , )
def lowercase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : str )->Union[str, Any]:
_UpperCAmelCase = ViTHybridModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] )->Tuple:
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = ViTHybridForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : List[str] )->List[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Optional[int] )->Dict:
_UpperCAmelCase = ViTHybridModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
pass
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase__ ( self : Dict )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCAmelCase = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def lowercase__ ( self : str )->List[Any]:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ViTHybridModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Dict )->List[str]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : List[Any] )->Optional[Any]:
_UpperCAmelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_UpperCAmelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCAmelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 602
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__A : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __UpperCamelCase : bool , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None )->Tuple:
super().__init__()
_UpperCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = torch.nn.Parameter(__UpperCamelCase )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self : Optional[int] , __UpperCamelCase : VQModel , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : TransformeraDModel , __UpperCamelCase : VQDiffusionScheduler , __UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings , )->Optional[int]:
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->str:
_UpperCAmelCase = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
_UpperCAmelCase = [''''''] * batch_size
_UpperCAmelCase = text_input_ids.shape[-1]
_UpperCAmelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='''pt''' , )
_UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase = negative_prompt_embeds.shape[1]
_UpperCAmelCase = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
_UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 1_0_0 , __UpperCamelCase : float = 5.0 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , )->Union[ImagePipelineOutput, Tuple]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = len(__UpperCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}' )
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(__UpperCamelCase )}.' )
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase = self.transformer.num_vector_embeds - 1
_UpperCAmelCase = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
_UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps.to(self.device )
_UpperCAmelCase = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = model_output.chunk(2 )
_UpperCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
_UpperCAmelCase = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.vqvae.config.vq_embed_dim
_UpperCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
_UpperCAmelCase = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float )->torch.FloatTensor:
_UpperCAmelCase , _UpperCAmelCase = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
_UpperCAmelCase = torch.exp(__UpperCamelCase )
_UpperCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
_UpperCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCAmelCase = keep_mask[:, :-1, :]
_UpperCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCAmelCase = log_p_x_0.clone()
_UpperCAmelCase = -torch.inf # -inf = log(0)
return rv
| 602
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.txt"""}
__snake_case = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__snake_case = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def _lowercase ( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
with open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as reader:
SCREAMING_SNAKE_CASE__ = reader.readlines()
for index, token in enumerate(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = token.rstrip('\n' )
SCREAMING_SNAKE_CASE__ = index
return vocab
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : str=200 ):
SCREAMING_SNAKE_CASE__ = vocab
SCREAMING_SNAKE_CASE__ = unk_token
SCREAMING_SNAKE_CASE__ = max_input_chars_per_word
def A_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
while start < len(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = None
while start < end:
SCREAMING_SNAKE_CASE__ = ''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = end
return sub_tokens
class lowercase__ ( _UpperCAmelCase ):
A__ : int =VOCAB_FILES_NAMES
A__ : int =PRETRAINED_VOCAB_FILES_MAP
A__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str =["""input_ids""", """attention_mask"""]
A__ : str =False
def __init__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]="<d>" , UpperCAmelCase_ : Optional[int]="</d>" , UpperCAmelCase_ : str="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : Union[str, Any]="<unk>" , UpperCAmelCase_ : int="</n>" , UpperCAmelCase_ : Optional[Any]="</_>" , UpperCAmelCase_ : Union[str, Any]="left" , **UpperCAmelCase_ : Optional[Any] , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=UpperCAmelCase_ , eod_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , line_token=UpperCAmelCase_ , space_token=UpperCAmelCase_ , padding_side=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = bod_token
SCREAMING_SNAKE_CASE__ = eod_token
SCREAMING_SNAKE_CASE__ = load_vocab(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase_ : x[1] ) )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A_ ( self : Tuple ):
return self.encoder[self.bod_token]
@property
def A_ ( self : str ):
return self.encoder[self.eod_token]
@property
def A_ ( self : List[str] ):
return self.encoder["\n"]
@property
def A_ ( self : str ):
return len(self.encoder )
def A_ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self : Any , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = []
for x in jieba.cut(UpperCAmelCase_ , cut_all=UpperCAmelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCAmelCase_ ) )
return output_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : Union[str, Any] ):
return token in self.encoder
def A_ ( self : Dict , UpperCAmelCase_ : List[str] ):
return "".join(UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : Dict ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def A_ ( self : List[Any] , UpperCAmelCase_ : Any ):
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def A_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if os.path.isdir(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
SCREAMING_SNAKE_CASE__ = (filename_prefix + '-' if filename_prefix else '') + save_directory
SCREAMING_SNAKE_CASE__ = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder['\n']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase_ : x[1] ) )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def A_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A_ ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ ))
return [1] + ([0] * len(UpperCAmelCase_ ))
| 472
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__snake_case = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = XLNetConfig.from_json_file(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
SCREAMING_SNAKE_CASE__ = finetuning_task
SCREAMING_SNAKE_CASE__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE__ = XLNetForSequenceClassification(UpperCamelCase_ )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE__ = finetuning_task
SCREAMING_SNAKE_CASE__ = XLNetForQuestionAnswering(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = XLNetLMHeadModel(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
print(F'Save PyTorch model to {os.path.abspath(UpperCamelCase_ )}' )
torch.save(model.state_dict() , UpperCamelCase_ )
print(F'Save configuration file to {os.path.abspath(UpperCamelCase_ )}' )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
__snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 472
| 1
|
"""simple docstring"""
def __a ( ) ->Union[str, Any]:
a__: List[str] = 0
for i in range(1 , 1001 ):
total += i**i
return str(_SCREAMING_SNAKE_CASE )[-10:]
if __name__ == "__main__":
print(solution())
| 714
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 217
| 0
|
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = OmegaConf.load(lowercase__ )
lowercase__ = torch.load(lowercase__ , map_location="""cpu""" )["""model"""]
lowercase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase__ = {}
lowercase__ = """first_stage_model."""
for key in keys:
if key.startswith(lowercase__ ):
lowercase__ = state_dict[key]
# extract state_dict for UNetLDM
lowercase__ = {}
lowercase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(lowercase__ ):
lowercase__ = state_dict[key]
lowercase__ = config.model.params.first_stage_config.params
lowercase__ = config.model.params.unet_config.params
lowercase__ = VQModel(**lowercase__ ).eval()
vqvae.load_state_dict(lowercase__ )
lowercase__ = UNetLDMModel(**lowercase__ ).eval()
unet.load_state_dict(lowercase__ )
lowercase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowercase__ , )
lowercase__ = LDMPipeline(lowercase__ , lowercase__ , lowercase__ )
pipeline.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
__A = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 325
|
'''simple docstring'''
def _A ( lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowercase__ , lowercase__ = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase__ , lowercase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
import os
import string
import sys
__snake_case : List[str] =1 << 8
__snake_case : Optional[int] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 2_7,
'up': 6_5 + ARROW_KEY_FLAG,
'down': 6_6 + ARROW_KEY_FLAG,
'right': 6_7 + ARROW_KEY_FLAG,
'left': 6_8 + ARROW_KEY_FLAG,
'mod_int': 9_1,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 5_0,
'delete': 5_1,
'pg_up': 5_3,
'pg_down': 5_4,
}
__snake_case : int =KEYMAP['up']
__snake_case : Tuple =KEYMAP['left']
if sys.platform == "win32":
__snake_case : List[str] =[]
__snake_case : str ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__snake_case : Optional[Any] =ord(str(i))
def lowerCAmelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : List[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowerCamelCase_) == 0:
# Read the keystroke
lowerCAmelCase__ : Tuple = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Union[str, Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Optional[int] = chr(WIN_KEYMAP[cha])
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int''']))
WIN_CH_BUFFER.append(lowerCamelCase_)
if ord(lowerCamelCase_) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126))
lowerCAmelCase__ : Tuple = chr(KEYMAP['''esc'''])
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : Dict = ch.decode(lowerCamelCase_)
else:
lowerCAmelCase__ : List[Any] = WIN_CH_BUFFER.pop(0)
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Union[str, Any] = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(lowerCamelCase_)
try:
tty.setraw(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = sys.stdin.read(1)
finally:
termios.tcsetattr(lowerCamelCase_ ,termios.TCSADRAIN ,lowerCamelCase_)
return ch
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : int = get_raw_chars()
if ord(lowerCamelCase_) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowerCamelCase_) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(lowerCamelCase_) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(lowerCamelCase_) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCamelCase_) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowerCamelCase_) + ARROW_KEY_FLAG)
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 717
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =AltDiffusionPipeline
snake_case_ =TEXT_TO_IMAGE_PARAMS
snake_case_ =TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ =TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
lowerCAmelCase__ : List[str] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=__lowerCamelCase ,set_alpha_to_one=__lowerCamelCase ,)
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_02 ,)
lowerCAmelCase__ : int = CLIPTextModel(__lowerCamelCase )
lowerCAmelCase__ : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ : Union[str, Any] = 77
lowerCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Optional[Any]:
"""simple docstring"""
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Tuple = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ : str = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,)
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ : Any = RobertaSeriesModelWithTransformation(__lowerCamelCase )
lowerCAmelCase__ : List[str] = text_encoder
lowerCAmelCase__ : List[Any] = AltDiffusionPipeline(**__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(__lowerCamelCase )
lowerCAmelCase__ : str = '''A photo of an astronaut'''
lowerCAmelCase__ : Any = alt_pipe(**__lowerCamelCase )
lowerCAmelCase__ : int = output.images
lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[Any] = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : str = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
lowerCAmelCase__ : int = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,)
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ : Tuple = RobertaSeriesModelWithTransformation(__lowerCamelCase )
lowerCAmelCase__ : List[str] = text_encoder
lowerCAmelCase__ : List[Any] = AltDiffusionPipeline(**__lowerCamelCase )
lowerCAmelCase__ : str = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = alt_pipe(**__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = output.images
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' ,safety_checker=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : Any = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : str = alt_pipe([prompt] ,generator=__lowerCamelCase ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type='''np''' )
lowerCAmelCase__ : str = output.images
lowerCAmelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ : Dict = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' ,subfolder='''scheduler''' )
lowerCAmelCase__ : List[str] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' ,scheduler=__lowerCamelCase ,safety_checker=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = alt_pipe([prompt] ,generator=__lowerCamelCase ,num_inference_steps=2 ,output_type='''numpy''' )
lowerCAmelCase__ : List[str] = output.images
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ : List[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 90
| 0
|
class A :
def __init__( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ ={}
def lowerCAmelCase__ ( self: str ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , " -> " , " -> ".join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: int , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
UpperCAmelCase_ =[to_vertex]
def lowerCAmelCase__ ( self: Optional[Any] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: list ) -> None:
'''simple docstring'''
UpperCAmelCase_ =True
print(_lowerCAmelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
__lowercase : Dict =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 54
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __lt__( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self[-1] < other[-1]
def __eq__( self , __SCREAMING_SNAKE_CASE ) ->Dict:
return self[-1] == other[-1]
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> list:
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(snake_case__ , snake_case__ )
if i != len(snake_case__ ):
stacks[i].append(snake_case__ )
else:
stacks.append(snake_case__ )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(snake_case__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase__ : int = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 312
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__snake_case : Optional[int] ='pt'
elif is_tf_available():
__snake_case : Optional[Any] ='tf'
else:
__snake_case : Any ='jax'
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =ByTaTokenizer
snake_case_ =False
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
super().setUp()
lowerCAmelCase__ : Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=False ,__lowerCamelCase=20 ,__lowerCamelCase=5 ) -> Tuple[str, list]:
"""simple docstring"""
lowerCAmelCase__ : Dict = []
for i in range(len(__lowerCamelCase ) ):
try:
lowerCAmelCase__ : List[str] = tokenizer.decode([i] ,clean_up_tokenization_spaces=__lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ : Union[str, Any] = list(filter(lambda __lowerCamelCase : re.match(R'''^[ a-zA-Z]+$''' ,t[1] ) ,__lowerCamelCase ) )
lowerCAmelCase__ : int = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=__lowerCamelCase ) ,__lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
lowerCAmelCase__ : Dict = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
lowerCAmelCase__ : str = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ : List[Any] = tokenizer.decode(__lowerCamelCase ,clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
lowerCAmelCase__ : Tuple = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=__lowerCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
lowerCAmelCase__ : int = ''' ''' + output_txt
lowerCAmelCase__ : Tuple = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ : Optional[int] = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowerCAmelCase__ : str = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] ,batch_without_eos_added['''input_ids'''] )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.ta_base_tokenizer
lowerCAmelCase__ : str = '''Unicode €.'''
lowerCAmelCase__ : Any = tokenizer(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] ,__lowerCamelCase )
# decoding
lowerCAmelCase__ : Tuple = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,'''Unicode €.</s>''' )
lowerCAmelCase__ : str = tokenizer('''e è é ê ë''' )
lowerCAmelCase__ : Optional[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] ,__lowerCamelCase )
# decoding
lowerCAmelCase__ : str = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,'''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) ,'''e è é ê ë</s>''' )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.ta_base_tokenizer
lowerCAmelCase__ : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase__ : Tuple = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
lowerCAmelCase__ : str = tokenizer(__lowerCamelCase ,padding=__lowerCamelCase ,return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
if FRAMEWORK != "jax":
lowerCAmelCase__ : Any = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual((2, 37) ,batch.input_ids.shape )
self.assertEqual((2, 37) ,batch.attention_mask.shape )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Any = self.ta_base_tokenizer
lowerCAmelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase__ : Optional[int] = tokenizer(__lowerCamelCase ,padding=__lowerCamelCase ,return_tensors=__lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' ,__lowerCamelCase )
self.assertIn('''attention_mask''' ,__lowerCamelCase )
self.assertNotIn('''decoder_input_ids''' ,__lowerCamelCase )
self.assertNotIn('''decoder_attention_mask''' ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.ta_base_tokenizer
lowerCAmelCase__ : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase__ : Optional[Any] = tokenizer(
text_target=__lowerCamelCase ,max_length=32 ,padding='''max_length''' ,truncation=__lowerCamelCase ,return_tensors=__lowerCamelCase )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.ta_base_tokenizer
lowerCAmelCase__ : int = ['''A long paragraph for summarization. </s>''']
lowerCAmelCase__ : Union[str, Any] = ['''Summary of the text. </s>''']
# fmt: off
lowerCAmelCase__ : Tuple = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
lowerCAmelCase__ : List[Any] = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
lowerCAmelCase__ : List[Any] = tokenizer(__lowerCamelCase ,text_target=__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,batch['''input_ids'''][0] )
self.assertEqual(__lowerCamelCase ,batch['''labels'''][0] )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCAmelCase__ : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = after_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
lowerCAmelCase__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : int = tempfile.mkdtemp()
lowerCAmelCase__ : int = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase__ : Dict = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.__class__.from_pretrained(__lowerCamelCase )
lowerCAmelCase__ : str = after_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertIn('''new_additional_special_token''' ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
lowerCAmelCase__ : int = tokenizer.__class__.from_pretrained(__lowerCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ : int = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ : Any = json.load(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = [f"""<extra_id_{i}>""" for i in range(1_25 )]
lowerCAmelCase__ : Tuple = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase__ : Dict = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowerCamelCase ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(__lowerCamelCase ,__lowerCamelCase )
with open(os.path.join(__lowerCamelCase ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(__lowerCamelCase ,__lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase ,)
self.assertIn(
'''an_additional_special_token''' ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ : Dict = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' ,lstrip=__lowerCamelCase )]
lowerCAmelCase__ : List[str] = tokenizer_class.from_pretrained(
__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,)
self.assertIn('''a_new_additional_special_token''' ,tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) ,)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
lowerCAmelCase__ : Dict = tokenizer_class.from_pretrained(__lowerCamelCase )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = self.get_tokenizers(fast=__lowerCamelCase ,do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ : List[str] = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCAmelCase__ : List[str] = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ : Optional[int] = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Dict = tokenizer.convert_ids_to_tokens(
__lowerCamelCase ,skip_special_tokens=__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase ,attr + '''_id''' ,__lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase ,__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase ,attr + '''_id''' ) ,__lowerCamelCase )
setattr(__lowerCamelCase ,attr + '''_id''' ,__lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase ,__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase ,attr + '''_id''' ) ,__lowerCamelCase )
setattr(__lowerCamelCase ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(__lowerCamelCase ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(__lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[] )
setattr(__lowerCamelCase ,'''additional_special_tokens_ids''' ,[token_id_to_test_setters] )
self.assertListEqual(getattr(__lowerCamelCase ,'''additional_special_tokens''' ) ,[token_to_test_setters] )
self.assertListEqual(getattr(__lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[token_id_to_test_setters] )
| 90
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Union[str, Any] =logging.get_logger(__name__)
__snake_case : Dict ={'vocab_file': 'sentencepiece.model'}
__snake_case : Optional[Any] ={
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
__snake_case : int ={
'google/rembert': 2_5_6,
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase="[CLS]" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="[UNK]" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="[PAD]" ,__lowerCamelCase="[CLS]" ,__lowerCamelCase="[MASK]" ,**__lowerCamelCase ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
do_lower_case=__lowerCamelCase ,remove_space=__lowerCamelCase ,keep_accents=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : Optional[Any] = remove_space
lowerCAmelCase__ : Any = keep_accents
lowerCAmelCase__ : Dict = vocab_file
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.__dict__.copy()
lowerCAmelCase__ : Dict = None
return state
def __setstate__(self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = d
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = self.sp_model.EncodeAsPieces(__lowerCamelCase )
return pieces
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.sp_model.PieceToId(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.IdToPiece(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.sp_model.decode_pieces(__lowerCamelCase )
return out_string
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 90
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[Any] = parent
__A : List[Any] = 13
__A : str = 7
__A : Optional[Any] = True
__A : List[str] = True
__A : Union[str, Any] = True
__A : Dict = True
__A : int = 99
__A : Optional[Any] = 32
__A : Tuple = 2
__A : str = 4
__A : str = 37
__A : List[str] = 'gelu'
__A : str = 0.1
__A : List[Any] = 0.1
__A : Optional[Any] = 512
__A : Any = 16
__A : Optional[int] = 2
__A : Dict = 0.02
__A : Union[str, Any] = 3
__A : List[Any] = 4
__A : Dict = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : int = None
if self.use_token_type_ids:
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : str = None
__A : str = None
__A : Any = None
if self.use_labels:
__A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : int = ids_tensor([self.batch_size] , self.num_choices)
__A : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = TFRoFormerModel(config=_UpperCAmelCase)
__A : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : List[Any] = [input_ids, input_mask]
__A : str = model(_UpperCAmelCase)
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = True
__A : List[Any] = TFRoFormerForCausalLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Tuple = model(_UpperCAmelCase)['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape) , [self.batch_size, self.seq_length, self.vocab_size])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = TFRoFormerForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : Tuple = TFRoFormerForSequenceClassification(config=_UpperCAmelCase)
__A : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.num_choices
__A : List[str] = TFRoFormerForMultipleChoice(config=_UpperCAmelCase)
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Dict = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Any = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : List[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : List[Any] = TFRoFormerForTokenClassification(config=_UpperCAmelCase)
__A : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFRoFormerForQuestionAnswering(config=_UpperCAmelCase)
__A : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : List[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Optional[Any] = config_and_inputs
__A : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = TFRoFormerModelTester(self)
__A : Tuple = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base')
self.assertIsNotNone(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Tuple = model(_UpperCAmelCase)[0]
# TODO Replace vocab size
__A : Tuple = 5_0000
__A : Any = [1, 6, vocab_size]
self.assertEqual(output.shape , _UpperCAmelCase)
print(output[:, :3, :3])
# TODO Replace values below with what was printed above.
__A : int = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = 1E-4
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = tf.constant([[4, 10]])
__A : Optional[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6)
__A : List[str] = emba(input_ids.shape)
__A : int = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]])
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , atol=self.tolerance)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
])
__A : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512)
emba([2, 16, 512])
__A : Tuple = emba.weight[:3, :5]
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , atol=self.tolerance)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = 1E-4
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 100
__A : Dict = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 100
__A : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64)
__A : str = embed_positions([2, 16, 768])[None, None, :, :]
__A ,__A : Tuple = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Dict = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
])
__A : Optional[int] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
])
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _UpperCAmelCase , atol=self.tolerance)
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _UpperCAmelCase , atol=self.tolerance)
| 8
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ ,R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' ,)
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : Optional[Any] , A_ : GenericTensor):
if self.framework == "tf":
lowerCAmelCase_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
lowerCAmelCase_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_)
else:
raise ValueError('''Unsupported framework''')
return masked_index
def UpperCAmelCase__ ( self : Tuple , A_ : GenericTensor):
lowerCAmelCase_ : List[str] = self.get_masked_index(A_)
lowerCAmelCase_ : Union[str, Any] = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def UpperCAmelCase__ ( self : str , A_ : GenericTensor):
if isinstance(A_ , A_):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A_)
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[int]=None , **A_ : List[str]):
if return_tensors is None:
lowerCAmelCase_ : Optional[int] = self.framework
lowerCAmelCase_ : Optional[Any] = self.tokenizer(A_ , return_tensors=A_)
self.ensure_exactly_one_mask_token(A_)
return model_inputs
def UpperCAmelCase__ ( self : List[str] , A_ : str):
lowerCAmelCase_ : Union[str, Any] = self.model(**A_)
lowerCAmelCase_ : List[str] = model_inputs['''input_ids''']
return model_outputs
def UpperCAmelCase__ ( self : str , A_ : str , A_ : str=5 , A_ : int=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase_ : int = target_ids.shape[0]
lowerCAmelCase_ : List[Any] = model_outputs['''input_ids'''][0]
lowerCAmelCase_ : int = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase_ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
lowerCAmelCase_ : Optional[Any] = outputs.numpy()
lowerCAmelCase_ : List[str] = outputs[0, masked_index, :]
lowerCAmelCase_ : List[Any] = stable_softmax(A_ , axis=-1)
if target_ids is not None:
lowerCAmelCase_ : str = tf.gather_nd(tf.squeeze(A_ , 0) , target_ids.reshape(-1 , 1))
lowerCAmelCase_ : Any = tf.expand_dims(A_ , 0)
lowerCAmelCase_ : List[Any] = tf.math.top_k(A_ , k=A_)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase_ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase_ : Dict = outputs[0, masked_index, :]
lowerCAmelCase_ : Dict = logits.softmax(dim=-1)
if target_ids is not None:
lowerCAmelCase_ : str = probs[..., target_ids]
lowerCAmelCase_ , lowerCAmelCase_ : int = probs.topk(A_)
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[int] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
lowerCAmelCase_ : int = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
lowerCAmelCase_ : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase_ : str = target_ids[p].tolist()
lowerCAmelCase_ : List[Any] = p
# Filter padding out:
lowerCAmelCase_ : Tuple = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase_ : Any = self.tokenizer.decode(A_ , skip_special_tokens=A_)
lowerCAmelCase_ : str = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence}
row.append(A_)
result.append(A_)
if single_mask:
return result[0]
return result
def UpperCAmelCase__ ( self : int , A_ : Any , A_ : List[Any]=None):
if isinstance(A_ , A_):
lowerCAmelCase_ : List[str] = [targets]
try:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Any = []
for target in targets:
lowerCAmelCase_ : List[str] = vocab.get(A_ , A_)
if id_ is None:
lowerCAmelCase_ : Optional[int] = self.tokenizer(
A_ , add_special_tokens=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , max_length=1 , truncation=A_ , )['''input_ids''']
if len(A_) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''')
continue
lowerCAmelCase_ : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""")
target_ids.append(id_)
lowerCAmelCase_ : List[str] = list(set(A_))
if len(A_) == 0:
raise ValueError('''At least one target must be provided when passed.''')
lowerCAmelCase_ : Tuple = np.array(A_)
return target_ids
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[int]=None , A_ : Tuple=None):
lowerCAmelCase_ : int = {}
if targets is not None:
lowerCAmelCase_ : Optional[Any] = self.get_target_ids(A_ , A_)
lowerCAmelCase_ : str = target_ids
if top_k is not None:
lowerCAmelCase_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''')
return {}, {}, postprocess_params
def __call__( self : str , A_ : Tuple , *A_ : Dict , **A_ : Optional[Any]):
lowerCAmelCase_ : Tuple = super().__call__(A_ , **A_)
if isinstance(A_ , A_) and len(A_) == 1:
return outputs[0]
return outputs
| 171
| 0
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCamelCase = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , ):
_lowercase = bnb_quantization_config.load_in_abit
_lowercase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
_lowercase = []
# custom device map
if isinstance(snake_case_ , snake_case_ ) and len(device_map.keys() ) > 1:
_lowercase = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowercase = get_keys_to_not_convert(snake_case_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case_ )
_lowercase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowercase = []
_lowercase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case_ )
# compatibility with peft
_lowercase = load_in_abit
_lowercase = load_in_abit
_lowercase = get_parameter_device(snake_case_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
_lowercase = replace_with_bnb_layers(snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ )
# convert param to the right dtype
_lowercase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowercase = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
_lowercase = getattr(snake_case_ , snake_case_ , snake_case_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(snake_case_ ):
param.to(snake_case_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
_lowercase = replace_with_bnb_layers(
snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ )
_lowercase = get_quantized_model_device_map(
snake_case_ , snake_case_ , snake_case_ , max_memory=snake_case_ , no_split_module_classes=snake_case_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowercase = True
_lowercase = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
snake_case_ , snake_case_ , snake_case_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case_ , offload_state_dict=snake_case_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case_ , device_map=snake_case_ , offload_dir=snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None ):
if device_map is None:
if torch.cuda.is_available():
_lowercase = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(snake_case_ , snake_case_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
_lowercase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowercase = {}
_lowercase = special_dtypes
_lowercase = no_split_module_classes
_lowercase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowercase = get_balanced_memory(
snake_case_ , low_zero=(device_map == """balanced_low_0""") , max_memory=snake_case_ , **snake_case_ , )
_lowercase = max_memory
_lowercase = infer_auto_device_map(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
# check if don't have any quantized module on the cpu
_lowercase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowercase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , snake_case_=None ):
if modules_to_not_convert is None:
_lowercase = []
_lowercase , _lowercase = _replace_with_bnb_layers(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , ):
_lowercase = False
for name, module in model.named_children():
if current_key_name is None:
_lowercase = []
current_key_name.append(snake_case_ )
if isinstance(snake_case_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowercase = """.""".join(snake_case_ )
_lowercase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowercase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowercase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_lowercase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
_lowercase = module.weight.data
if module.bias is not None:
_lowercase = module.bias.data
bnb_module.requires_grad_(snake_case_ )
setattr(snake_case_ , snake_case_ , snake_case_ )
_lowercase = True
if len(list(module.children() ) ) > 0:
_lowercase , _lowercase = _replace_with_bnb_layers(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_lowercase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
# Create a copy of the model
with init_empty_weights():
_lowercase = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowercase = find_tied_parameters(snake_case_ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case_ , snake_case_ ):
_lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowercase = sum(snake_case_ , [] )
_lowercase = len(snake_case_ ) > 0
# Check if it is a base model
_lowercase = False
if hasattr(snake_case_ , """base_model_prefix""" ):
_lowercase = not hasattr(snake_case_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowercase = list(model.named_children() )
_lowercase = [list_modules[-1][0]]
# add last module together with tied weights
_lowercase = set(snake_case_ ) - set(snake_case_ )
_lowercase = list(set(snake_case_ ) ) + list(snake_case_ )
# remove ".weight" from the keys
_lowercase = [""".weight""", """.bias"""]
_lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowercase = name.replace(snake_case_ , """""" )
filtered_module_names.append(snake_case_ )
return filtered_module_names
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
for m in model.modules():
if isinstance(snake_case_ , bnb.nn.Linearabit ):
return True
return False
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
return next(parameter.parameters() ).device
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case_ , snake_case_ , 0 , dtype=snake_case_ , value=snake_case_ )
_lowercase = param_name
_lowercase = model
if "." in tensor_name:
_lowercase = tensor_name.split(""".""" )
for split in splits[:-1]:
_lowercase = getattr(snake_case_ , snake_case_ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
_lowercase = new_module
_lowercase = splits[-1]
# offload weights
_lowercase = False
offload_weight(module._parameters[tensor_name] , snake_case_ , snake_case_ , index=snake_case_ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , snake_case_ , index=snake_case_ , )
else:
offload_weight(snake_case_ , snake_case_ , snake_case_ , index=snake_case_ )
offload_weight(snake_case_ , param_name.replace("""weight""" , """SCB""" ) , snake_case_ , index=snake_case_ )
set_module_tensor_to_device(snake_case_ , snake_case_ , """meta""" , dtype=snake_case_ , value=torch.empty(*param.size() ) )
| 572
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'owlvit_text_model'
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any]=4_94_08 , lowercase__ : List[str]=5_12 , lowercase__ : Optional[Any]=20_48 , lowercase__ : List[str]=12 , lowercase__ : List[Any]=8 , lowercase__ : List[Any]=16 , lowercase__ : List[str]="quick_gelu" , lowercase__ : Tuple=1e-5 , lowercase__ : int=0.0 , lowercase__ : str=0.02 , lowercase__ : List[Any]=1.0 , lowercase__ : int=0 , lowercase__ : int=4_94_06 , lowercase__ : int=4_94_07 , **lowercase__ : Any , ) ->Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = intermediate_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = max_position_embeddings
_lowercase = hidden_act
_lowercase = layer_norm_eps
_lowercase = attention_dropout
_lowercase = initializer_range
_lowercase = initializer_factor
@classmethod
def _UpperCAmelCase ( cls : List[Any] , lowercase__ : Union[str, os.PathLike] , **lowercase__ : Tuple) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowercase__)
_lowercase , _lowercase = cls.get_config_dict(lowercase__ , **lowercase__)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""") == "owlvit":
_lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowercase__ , **lowercase__)
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : str = 'owlvit_vision_model'
def __init__( self : Optional[int] , lowercase__ : Dict=7_68 , lowercase__ : Tuple=30_72 , lowercase__ : List[str]=12 , lowercase__ : str=12 , lowercase__ : Any=3 , lowercase__ : Union[str, Any]=7_68 , lowercase__ : Union[str, Any]=32 , lowercase__ : Dict="quick_gelu" , lowercase__ : Tuple=1e-5 , lowercase__ : List[Any]=0.0 , lowercase__ : List[str]=0.02 , lowercase__ : List[Any]=1.0 , **lowercase__ : List[Any] , ) ->int:
"""simple docstring"""
super().__init__(**lowercase__)
_lowercase = hidden_size
_lowercase = intermediate_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = num_channels
_lowercase = image_size
_lowercase = patch_size
_lowercase = hidden_act
_lowercase = layer_norm_eps
_lowercase = attention_dropout
_lowercase = initializer_range
_lowercase = initializer_factor
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , lowercase__ : Union[str, os.PathLike] , **lowercase__ : Optional[int]) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowercase__)
_lowercase , _lowercase = cls.get_config_dict(lowercase__ , **lowercase__)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""") == "owlvit":
_lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowercase__ , **lowercase__)
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'owlvit'
__SCREAMING_SNAKE_CASE : Tuple = True
def __init__( self : str , lowercase__ : List[str]=None , lowercase__ : int=None , lowercase__ : str=5_12 , lowercase__ : Any=2.6592 , lowercase__ : List[str]=True , **lowercase__ : str , ) ->Tuple:
"""simple docstring"""
super().__init__(**lowercase__)
if text_config is None:
_lowercase = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""")
if vision_config is None:
_lowercase = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""")
_lowercase = OwlViTTextConfig(**lowercase__)
_lowercase = OwlViTVisionConfig(**lowercase__)
_lowercase = projection_dim
_lowercase = logit_scale_init_value
_lowercase = return_dict
_lowercase = 1.0
@classmethod
def _UpperCAmelCase ( cls : int , lowercase__ : Union[str, os.PathLike] , **lowercase__ : List[str]) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowercase__)
_lowercase , _lowercase = cls.get_config_dict(lowercase__ , **lowercase__)
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowercase__ , **lowercase__)
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , lowercase__ : Dict , lowercase__ : Dict , **lowercase__ : str) ->Union[str, Any]:
"""simple docstring"""
_lowercase = {}
_lowercase = text_config
_lowercase = vision_config
return cls.from_dict(lowercase__ , **lowercase__)
def _UpperCAmelCase ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowercase = copy.deepcopy(self.__dict__)
_lowercase = self.text_config.to_dict()
_lowercase = self.vision_config.to_dict()
_lowercase = self.__class__.model_type
return output
class __a ( _snake_case ):
@property
def _UpperCAmelCase ( self : List[str]) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
])
@property
def _UpperCAmelCase ( self : Union[str, Any]) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
])
@property
def _UpperCAmelCase ( self : str) ->float:
"""simple docstring"""
return 1e-4
def _UpperCAmelCase ( self : str , lowercase__ : "ProcessorMixin" , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
"""simple docstring"""
_lowercase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=lowercase__ , seq_length=lowercase__ , framework=lowercase__)
_lowercase = super().generate_dummy_inputs(
processor.image_processor , batch_size=lowercase__ , framework=lowercase__)
return {**text_input_dict, **image_input_dict}
@property
def _UpperCAmelCase ( self : Any) ->int:
"""simple docstring"""
return 14
| 572
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A__ : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase__ ( _UpperCAmelCase ):
_UpperCAmelCase :int = ["pixel_values"]
def __init__( self : Union[str, Any] , snake_case__ : List[str] = True , snake_case__ : int = None , snake_case__ : Union[str, Any] = PILImageResampling.BICUBIC , snake_case__ : List[Any] = True , snake_case__ : Union[str, Any] = None , snake_case__ : str = True , snake_case__ : Tuple = 1 / 255 , snake_case__ : str = True , snake_case__ : Dict = None , snake_case__ : List[Any] = None , snake_case__ : int = True , **snake_case__ : Any , ):
super().__init__(**_lowerCAmelCase )
lowerCamelCase_ : Union[str, Any] =size if size is not None else {"shortest_edge": 224}
lowerCamelCase_ : Tuple =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
lowerCamelCase_ : str =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ : List[Any] =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="crop_size" )
lowerCamelCase_ : int =do_resize
lowerCamelCase_ : List[Any] =size
lowerCamelCase_ : Tuple =resample
lowerCamelCase_ : Dict =do_center_crop
lowerCamelCase_ : Tuple =crop_size
lowerCamelCase_ : Optional[int] =do_rescale
lowerCamelCase_ : Optional[int] =rescale_factor
lowerCamelCase_ : Tuple =do_normalize
lowerCamelCase_ : Dict =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ : List[str] =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ : Union[str, Any] =do_convert_rgb
def UpperCAmelCase__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple = PILImageResampling.BICUBIC , snake_case__ : Optional[Any] = None , **snake_case__ : Optional[int] , ):
lowerCamelCase_ : Optional[Any] =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCamelCase_ : str =get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] = None , **snake_case__ : Tuple , ):
lowerCamelCase_ : Tuple =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCAmelCase__ ( self : int , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] = None , **snake_case__ : Optional[int] , ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCAmelCase__ ( self : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : int = None , **snake_case__ : Any , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] = None , snake_case__ : int = None , snake_case__ : Dict = None , snake_case__ : Union[str, Any] = None , snake_case__ : Optional[Any] = None , snake_case__ : Any = None , snake_case__ : Optional[Any] = None , snake_case__ : List[str] = None , snake_case__ : Optional[Any] = None , snake_case__ : Union[str, Any] = None , snake_case__ : List[Any] = None , snake_case__ : Dict = None , snake_case__ : Optional[int] = ChannelDimension.FIRST , **snake_case__ : Tuple , ):
lowerCamelCase_ : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Dict =size if size is not None else self.size
lowerCamelCase_ : Optional[int] =get_size_dict(_lowerCAmelCase , param_name="size" , default_to_square=_lowerCAmelCase )
lowerCamelCase_ : str =resample if resample is not None else self.resample
lowerCamelCase_ : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : Any =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : Union[str, Any] =get_size_dict(_lowerCAmelCase , param_name="crop_size" , default_to_square=_lowerCAmelCase )
lowerCamelCase_ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : Tuple =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : str =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : List[str] =image_std if image_std is not None else self.image_std
lowerCamelCase_ : Optional[int] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ : Tuple =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ : Optional[Any] =[convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ : Tuple =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ : List[Any] =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ : Dict =[self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ : Optional[int] =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ : Dict =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
lowerCamelCase_ : List[str] =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
lowerCamelCase_ : Dict ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 153
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( ):
lowercase = 2
while True:
if is_prime(lowercase_ ):
yield num
num += 1
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 200_0000 ):
return sum(takewhile(lambda lowercase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 588
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] = logging.get_logger(__name__)
_A : Optional[int] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class a__ ( a_ ):
__lowerCAmelCase = """markuplm"""
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=0 , _a=0 , _a=2 , _a=256 , _a=1_024 , _a=216 , _a=1_001 , _a=32 , _a=50 , _a="absolute" , _a=True , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
lowercase : Dict = vocab_size
lowercase : int = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : Dict = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Optional[Any] = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : int = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
# additional properties
lowercase : Tuple = max_depth
lowercase : str = max_xpath_tag_unit_embeddings
lowercase : List[Any] = max_xpath_subs_unit_embeddings
lowercase : str = tag_pad_id
lowercase : Optional[Any] = subs_pad_id
lowercase : str = xpath_unit_hidden_size
| 518
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __magic_name__ ( __snake_case : List[str] ) -> Tuple:
lowercase : Union[str, Any] = os.path.join(args.tf_model_dir , "parameters.json" )
lowercase : List[str] = json.loads(open(__snake_case ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
lowercase : Dict = args.output + ".pt"
lowercase : Tuple = OrderedDict()
with tf.device("/CPU:0" ):
lowercase : Union[str, Any] = tf.train.load_checkpoint(args.tf_model_dir )
lowercase : Optional[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Optional[int] = reader.get_tensor(__snake_case ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
lowercase : Dict = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
lowercase : str = 8
lowercase : Tuple = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Tuple = torch.tensor(__snake_case )
elif key_name.startswith("model/moe" ):
lowercase : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
lowercase : Optional[int] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
lowercase : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = torch.tensor(__snake_case )
elif key_name.endswith("/softmlp/kernel" ):
lowercase : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
lowercase : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Union[str, Any] = torch.tensor(__snake_case )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
lowercase : Dict = key_name[-9:-7]
for i in range(16 ):
lowercase : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
lowercase : Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : Any = torch.tensor(__snake_case )
elif key_name.startswith("model/mlp" ):
lowercase : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
lowercase : Any = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
lowercase : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] = torch.tensor(__snake_case )
elif key_name.endswith("/p1/bias" ):
lowercase : Any = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
lowercase : Tuple = vnp.copy() # same because it is one dimensional
lowercase : List[Any] = torch.tensor(__snake_case )
elif key_name.endswith("/p2/kernel" ):
lowercase : Any = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
lowercase : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] = torch.tensor(__snake_case )
elif key_name.endswith("/p2/bias" ):
lowercase : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
lowercase : Dict = vnp.copy() # same because it is one dimensional
lowercase : Tuple = torch.tensor(__snake_case )
elif key_name.startswith("model/ln" ):
lowercase : Dict = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowercase : int = "model.blocks.%d.feed_forward.norm.bias" % player
lowercase : List[str] = vnp.copy() # same because it is one dimensional
lowercase : Any = torch.tensor(__snake_case )
elif key_name.endswith("/g" ):
lowercase : Optional[int] = "model.blocks.%d.feed_forward.norm.weight" % player
lowercase : Optional[int] = vnp.copy() # same because it is one dimensional
lowercase : Tuple = torch.tensor(__snake_case )
elif key_name.startswith("model/att" ):
lowercase : Optional[int] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
lowercase : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Optional[Any] = state[:, 0, :, :]
lowercase : Optional[Any] = state[:, 1, :, :]
lowercase : Tuple = state[:, 2, :, :]
lowercase : Optional[int] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
lowercase : List[str] = torch.tensor(__snake_case )
lowercase : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
lowercase : Tuple = torch.tensor(__snake_case )
lowercase : Tuple = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
lowercase : List[str] = torch.tensor(__snake_case )
elif key_name.endswith("/o/kernel" ):
lowercase : Dict = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
lowercase : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict = torch.tensor(__snake_case )
elif key_name.startswith("model/an" ):
lowercase : int = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowercase : List[str] = "model.blocks.%d.self_attn.norm.bias" % player
lowercase : Optional[int] = vnp.copy() # same because it is one dimensional
lowercase : str = torch.tensor(__snake_case )
elif key_name.endswith("/g" ):
lowercase : Optional[int] = "model.blocks.%d.self_attn.norm.weight" % player
lowercase : str = vnp.copy() # same because it is one dimensional
lowercase : Dict = torch.tensor(__snake_case )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
lowercase : List[Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
lowercase : Optional[int] = "model.%s.weight" % nlayer
lowercase : Optional[Any] = vnp.copy() # same in embedded
lowercase : Optional[Any] = torch.tensor(__snake_case )
if key_name.startswith("model/wte" ):
lowercase : Optional[int] = "lm_head.weight"
lowercase : List[Any] = vnp.copy() # same in embedded
lowercase : Any = torch.tensor(__snake_case )
elif key_name.startswith("model/wob" ):
lowercase : List[Any] = "final_logits_bias"
lowercase : Tuple = vnp.copy() # same in embedded
lowercase : Tuple = state.reshape((1, -1) )
lowercase : Any = torch.tensor(__snake_case )
elif key_name == "model/dense/kernel":
lowercase : Dict = "model.last_project.weight"
lowercase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : str = torch.tensor(__snake_case )
elif key_name == "model/dense_1/bias":
lowercase : Tuple = "model.last_project.bias"
lowercase : List[str] = vnp.copy() # same because it is one dimensional
lowercase : int = torch.tensor(__snake_case )
torch.save(__snake_case , args.output )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
_A : List[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 518
| 1
|
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase :List[str] = '#'
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> None:
_UpperCAmelCase : dict = {}
def __lowerCAmelCase ( self , A ) -> None:
_UpperCAmelCase : int = self._trie
for char in text:
if char not in trie:
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Any = trie[char]
_UpperCAmelCase : int = True
def __lowerCAmelCase ( self , A ) -> tuple | list:
_UpperCAmelCase : Any = self._trie
for char in prefix:
if char in trie:
_UpperCAmelCase : Optional[int] = trie[char]
else:
return []
return self._elements(A )
def __lowerCAmelCase ( self , A ) -> tuple:
_UpperCAmelCase : str = []
for c, v in d.items():
_UpperCAmelCase : Optional[int] = [''' '''] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
_lowerCAmelCase :int = Trie()
_lowerCAmelCase :int = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = trie.find_word(UpperCamelCase__ )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ():
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 506
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Tuple:
_UpperCAmelCase : int = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Union[str, Any] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : str = use_input_mask
_UpperCAmelCase : Tuple = use_token_type_ids
_UpperCAmelCase : Tuple = use_labels
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Optional[Any] = type_vocab_size
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : Any = scope
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_input_mask:
_UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : str = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Dict:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> int:
_UpperCAmelCase : List[str] = DistilBertModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : List[Any] = model(A , A )
_UpperCAmelCase : Tuple = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Tuple = DistilBertForMaskedLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[str]:
_UpperCAmelCase : Optional[Any] = DistilBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : str = model(
A , attention_mask=A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = self.num_labels
_UpperCAmelCase : Dict = DistilBertForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> int:
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : int = DistilBertForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> str:
_UpperCAmelCase : List[str] = self.num_choices
_UpperCAmelCase : Optional[int] = DistilBertForMultipleChoice(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = config_and_inputs
_UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ =True
a__ =True
a__ =True
a__ =True
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = DistilBertModelTester(self )
_UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A , dim=3_7 )
def __lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A )
@slow
def __lowerCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[Any] = DistilBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Dict = model_class(config=A )
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : List[Any] = torch.jit.trace(
A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) )
_UpperCAmelCase : Optional[Any] = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A )
loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCAmelCase : int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(A , attention_mask=A )[0]
_UpperCAmelCase : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 506
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 704
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 0
|
def _lowerCAmelCase ( A__: Optional[int] , A__: str ):
'''simple docstring'''
UpperCAmelCase = [1]
for i in range(2 , __UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase = []
UpperCAmelCase = list(range(__UpperCAmelCase ) )
# Find permutation
while factorials:
UpperCAmelCase = factorials.pop()
UpperCAmelCase = divmod(__UpperCAmelCase , __UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = 'RegNetConfig'
# Base docstring
__lowerCamelCase = 'facebook/regnet-y-040'
__lowerCamelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCamelCase = 'facebook/regnet-y-040'
__lowerCamelCase = 'tabby, tabby cat'
__lowerCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( tf.keras.layers.Layer ):
def __init__( self : List[str] , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , **__snake_case : str , ) -> Any:
super().__init__(**__snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__magic_name__: Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__magic_name__: Dict = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=__snake_case , strides=__snake_case , padding="""VALID""" , groups=__snake_case , use_bias=__snake_case , name="""convolution""" , )
__magic_name__: int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
__magic_name__: Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase__ ( self : Optional[int] , __snake_case : str ) -> Dict:
__magic_name__: Optional[Any] = self.convolution(self.padding(__snake_case ) )
__magic_name__: Union[str, Any] = self.normalization(__snake_case )
__magic_name__: Tuple = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , **__snake_case : Dict ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: Tuple = config.num_channels
__magic_name__: Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCamelCase__ ( self : List[str] , __snake_case : Dict ) -> int:
__magic_name__: Union[str, Any] = shape_list(__snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__magic_name__: Any = tf.transpose(__snake_case , perm=(0, 2, 3, 1) )
__magic_name__: Dict = self.embedder(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int = 2 , **__snake_case : Any ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: Union[str, Any] = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=1 , strides=__snake_case , use_bias=__snake_case , name="""convolution""" )
__magic_name__: Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : tf.Tensor , __snake_case : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(__snake_case ) , training=__snake_case )
class __A ( tf.keras.layers.Layer ):
def __init__( self : int , __snake_case : int , __snake_case : int , **__snake_case : str ) -> str:
super().__init__(**__snake_case )
__magic_name__: Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name="""pooler""" )
__magic_name__: Optional[Any] = [
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCamelCase__ ( self : Dict , __snake_case : List[str] ) -> List[Any]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__magic_name__: List[str] = self.pooler(__snake_case )
for layer_module in self.attention:
__magic_name__: List[str] = layer_module(__snake_case )
__magic_name__: Optional[Any] = hidden_state * pooled
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 , **__snake_case : Optional[int] ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: List[str] = in_channels != out_channels or stride != 1
__magic_name__: Union[str, Any] = max(1 , out_channels // config.groups_width )
__magic_name__: Optional[Any] = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__magic_name__: List[str] = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name="""layer.2""" ),
]
__magic_name__: Any = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Any ) -> Union[str, Any]:
__magic_name__: Any = hidden_state
for layer_module in self.layers:
__magic_name__: Optional[int] = layer_module(__snake_case )
__magic_name__: str = self.shortcut(__snake_case )
hidden_state += residual
__magic_name__: int = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : List[str] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 , **__snake_case : Union[str, Any] ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: str = in_channels != out_channels or stride != 1
__magic_name__: Dict = max(1 , out_channels // config.groups_width )
__magic_name__: Tuple = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__magic_name__: str = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name="""layer.3""" ),
]
__magic_name__: Optional[int] = ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : List[str] , __snake_case : int ) -> Dict:
__magic_name__: int = hidden_state
for layer_module in self.layers:
__magic_name__: Optional[Any] = layer_module(__snake_case )
__magic_name__: Union[str, Any] = self.shortcut(__snake_case )
hidden_state += residual
__magic_name__: Any = self.activation(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : int , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , **__snake_case : List[Any] ) -> Optional[int]:
super().__init__(**__snake_case )
__magic_name__: int = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__magic_name__: Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(__snake_case , __snake_case , __snake_case , stride=__snake_case , name="""layers.0""" ),
*[layer(__snake_case , __snake_case , __snake_case , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase__ ( self : int , __snake_case : Union[str, Any] ) -> Tuple:
for layer_module in self.layers:
__magic_name__: Dict = layer_module(__snake_case )
return hidden_state
class __A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , **__snake_case : Optional[Any] ) -> Dict:
super().__init__(**__snake_case )
__magic_name__: List[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__magic_name__: Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case , name=F'stages.{i+1}' ) )
def lowerCamelCase__ ( self : int , __snake_case : tf.Tensor , __snake_case : bool = False , __snake_case : bool = True ) -> TFBaseModelOutputWithNoAttention:
__magic_name__: int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__magic_name__: Optional[Any] = hidden_states + (hidden_state,)
__magic_name__: Optional[Any] = stage_module(__snake_case )
if output_hidden_states:
__magic_name__: int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
@keras_serializable
class __A ( tf.keras.layers.Layer ):
UpperCAmelCase__ = RegNetConfig
def __init__( self : Optional[int] , __snake_case : Any , **__snake_case : List[str] ) -> int:
super().__init__(**__snake_case )
__magic_name__: Union[str, Any] = config
__magic_name__: Optional[int] = TFRegNetEmbeddings(__snake_case , name="""embedder""" )
__magic_name__: int = TFRegNetEncoder(__snake_case , name="""encoder""" )
__magic_name__: int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name="""pooler""" )
@unpack_inputs
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : tf.Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__magic_name__: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: int = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: List[str] = self.embedder(__snake_case , training=__snake_case )
__magic_name__: Optional[Any] = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
__magic_name__: str = encoder_outputs[0]
__magic_name__: List[Any] = self.pooler(__snake_case )
# Change to NCHW output format have uniformity in the modules
__magic_name__: int = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
__magic_name__: List[str] = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__magic_name__: List[str] = tuple([tf.transpose(__snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = RegNetConfig
UpperCAmelCase__ = "regnet"
UpperCAmelCase__ = "pixel_values"
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__lowerCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,SCREAMING_SNAKE_CASE_ ,)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __snake_case : RegNetConfig , *__snake_case : List[Any] , **__snake_case : Tuple ) -> Tuple:
super().__init__(__snake_case , *__snake_case , **__snake_case )
__magic_name__: List[str] = TFRegNetMainLayer(__snake_case , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : Dict , __snake_case : tf.Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : int=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__magic_name__: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: List[str] = self.regnet(
pixel_values=__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,SCREAMING_SNAKE_CASE_ ,)
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
def __init__( self : int , __snake_case : RegNetConfig , *__snake_case : Any , **__snake_case : Any ) -> Optional[Any]:
super().__init__(__snake_case , *__snake_case , **__snake_case )
__magic_name__: Union[str, Any] = config.num_labels
__magic_name__: Tuple = TFRegNetMainLayer(__snake_case , name="""regnet""" )
# classification head
__magic_name__: List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : List[str] , __snake_case : tf.Tensor = None , __snake_case : tf.Tensor = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__magic_name__: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: Any = self.regnet(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
__magic_name__: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
__magic_name__: Optional[int] = self.classifier[0](__snake_case )
__magic_name__: List[Any] = self.classifier[1](__snake_case )
__magic_name__: Optional[int] = None if labels is None else self.hf_compute_loss(labels=__snake_case , logits=__snake_case )
if not return_dict:
__magic_name__: List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 96
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True ):
'''simple docstring'''
model.train()
UpperCAmelCase_ : int = model(__lowerCAmelCase )
UpperCAmelCase_ : str = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def lowerCamelCase__ ( _lowercase , _lowercase=False ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ : Any = RegressionModel()
UpperCAmelCase_ : Union[str, Any] = deepcopy(__lowerCAmelCase )
UpperCAmelCase_ : int = RegressionDataset(length=80 )
UpperCAmelCase_ : Optional[Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Optional[Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ : List[Any] = LambdaLR(__lowerCAmelCase , lr_lambda=lambda _lowercase : epoch**0.65 )
UpperCAmelCase_ : List[str] = LambdaLR(__lowerCAmelCase , lr_lambda=lambda _lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ : List[str] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
UpperCAmelCase_ : Tuple = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = get_training_setup(__lowerCAmelCase )
# Use a single batch
UpperCAmelCase_ : List[Any] = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ : List[str] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Any = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = get_training_setup(__lowerCAmelCase )
# Use a single batch
UpperCAmelCase_ : Tuple = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ : int = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def lowerCamelCase__ ( _lowercase=False , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_ : str = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ : Optional[Any] = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
UpperCAmelCase_ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ : List[str] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Optional[Any] = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def lowerCamelCase__ ( _lowercase=False , _lowercase=False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ : Any = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
UpperCAmelCase_ : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ : int = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
UpperCAmelCase_ : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : int = RegressionDataset(length=80 )
UpperCAmelCase_ : List[Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
UpperCAmelCase_ : Dict = RegressionDataset(length=96 )
UpperCAmelCase_ : List[Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator()
UpperCAmelCase_ : Union[str, Any] = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 718
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__a = '\\n\n'
__a = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__a = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) ,reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE=None ) -> str:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase_ : str = '''cuda'''
else:
UpperCAmelCase_ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase_ : Tuple = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase_ : Union[str, Any] = model.config.max_length - 1
else:
UpperCAmelCase_ : int = model.config.max_length
UpperCAmelCase_ : str = tokenizer(
_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ,return_attention_mask=_SCREAMING_SNAKE_CASE ,).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = encodings['''input_ids''']
UpperCAmelCase_ : Optional[int] = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Tuple = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 ,len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : str = min(start_index + batch_size ,len(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : str = encoded_texts[start_index:end_index]
UpperCAmelCase_ : List[Any] = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase_ : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase_ : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(_SCREAMING_SNAKE_CASE ), attn_mask] ,dim=1 )
UpperCAmelCase_ : Union[str, Any] = encoded_batch
with torch.no_grad():
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ).logits
UpperCAmelCase_ : int = out_logits[..., :-1, :].contiguous()
UpperCAmelCase_ : Optional[Any] = labels[..., 1:].contiguous()
UpperCAmelCase_ : Tuple = attn_mask[..., 1:].contiguous()
UpperCAmelCase_ : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,_SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_SCREAMING_SNAKE_CASE )}
| 300
| 0
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a__ : Optional[Any] = True
from torch.cuda.amp import autocast
a__ : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A : Optional[bool] = field(
default=lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A : Optional[bool] = field(
default=lowerCamelCase , metadata={"help": "Whether to log verbose messages or not."} , )
A : Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
A : Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
A : Optional[float] = field(
default=0.99_9995 , metadata={"help": "Decay of gumbel temperature during training."} )
def _lowerCAmelCase ( A__ , A__ ):
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ = logging.WARNING
if model_args.verbose_logging:
lowercase__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase__ = logging.INFO
logger.setLevel(A__ )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : str = field(
default=lowerCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
A : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A : Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
A : Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
A : Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
A : bool = field(
default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A : Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
A : Optional[int] = field(
default=lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
A : Optional[float] = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : WavaVecaForPreTraining
A : WavaVecaFeatureExtractor
A : Union[bool, str] = "longest"
A : Optional[int] = None
A : Optional[int] = None
def __call__( self : Any , lowerCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowercase__ = self.feature_extractor.pad(
lowerCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
lowercase__ = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1])
lowercase__ = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase__ = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1)).to(
torch.long)
lowercase__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase__ = 1
lowercase__ = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
lowercase__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase , min_masks=2 , )
return batch
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *lowerCAmelCase : str , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Union[str, Any]=1.0 , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
lowercase__ = 0
lowercase__ = max_gumbel_temp
lowercase__ = min_gumbel_temp
lowercase__ = gumbel_temp_decay
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : nn.Module , lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""simple docstring"""
model.train()
lowercase__ = self._prepare_inputs(lowerCAmelCase)
if self.use_amp:
with autocast():
lowercase__ = self.compute_loss(lowerCAmelCase , lowerCAmelCase)
else:
lowercase__ = self.compute_loss(lowerCAmelCase , lowerCAmelCase)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''')
if self.args.gradient_accumulation_steps > 1:
lowercase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__, lowercase__, lowercase__ = parser.parse_args_into_dataclasses()
configure_logger(A__ , A__ )
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase__ = DatasetDict()
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase__ = DatasetDict()
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A__ )
def prepare_dataset(A__ ):
# check that all files have the correct sampling rate
lowercase__, lowercase__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase__ = datasets.map(
A__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
lowercase__ = vectorized_datasets.filter(
lambda A__ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A__ ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase__ = vectorized_datasets.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
lowercase__ = WavaVecaForPreTraining(A__ )
lowercase__ = DataCollatorForWavaVecaPretraining(model=A__ , feature_extractor=A__ )
lowercase__ = WavaVecaPreTrainer(
model=A__ , data_collator=A__ , args=A__ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=A__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 622
|
a__ : Tuple = "Tobias Carryer"
from time import time
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : str=int(time())) -> List[Any]: # noqa: B008
"""simple docstring"""
lowercase__ = multiplier
lowercase__ = increment
lowercase__ = modulo
lowercase__ = seed
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
lowercase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a__ : str = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 622
| 1
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
lowercase__ = 'naver-clova-ix/donut-base'
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = DonutProcessor.from_pretrained(lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Any = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a__: Optional[Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a__: List[Any] = self.processor.tokenajson(lowercase)
self.assertDictEqual(lowercase , lowercase)
| 217
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = '▁'
lowercase__ = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
lowercase__ = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
lowercase__ = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
lowercase__ = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
lowercase__ = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class __snake_case ( __lowerCAmelCase ):
a__ = ["input_ids"]
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = RESOURCE_FILES_NAMES
def __init__( self , lowercase , lowercase=None , lowercase=False , lowercase="utf8" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
a__: Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , vocab_file=lowercase , encoding=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
a__: Optional[Any] = do_lower_case
a__: Tuple = sentencepiece_model_ckpt
a__: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
a__: List[str] = self.load_vocab(filepath=lowercase)
else:
a__: str = {self.sp_model.id_to_piece(lowercase): id for id in range(self.sp_model.get_piece_size())}
a__: Tuple = {v: k for k, v in self.vocab.items()}
def lowerCamelCase_ ( self , lowercase) -> str:
'''simple docstring'''
if text is None:
return None
a__: int = self.tokenize(lowercase)
a__ , a__: Union[str, Any] = '', []
for i, ch in enumerate(lowercase):
if ch in self.SP_CHAR_MAPPING:
a__: List[str] = self.SP_CHAR_MAPPING.get(lowercase)
else:
a__: Optional[Any] = unicodedata.normalize('NFKC' , lowercase)
if self.is_whitespace(lowercase):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase))
a__ , a__ , a__: Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
a__: Union[str, Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
a__: List[str] = token[1:]
a__: str = text[offset:].index(lowercase) + offset
a__: Optional[int] = start + len(lowercase)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
a__: Optional[Any] = end
return token_mapping
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return len(self.vocab)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder)
def __getstate__( self) -> Dict:
'''simple docstring'''
a__: Optional[Any] = self.__dict__.copy()
a__: Union[str, Any] = None
return state
def __setstate__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a__: List[Any] = {}
a__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(lowercase , lowercase) for c in text))
def lowerCamelCase_ ( self , lowercase , lowercase=False , lowercase=64 , lowercase=0.1) -> Tuple:
'''simple docstring'''
if self.sp_model_kwargs.get('enable_sampling') is True:
a__: Dict = True
if self.sp_model_kwargs.get('alpha') is not None:
a__: int = self.sp_model_kwargs.get('alpha')
if self.sp_model_kwargs.get('nbest_size') is not None:
a__: Dict = self.sp_model_kwargs.get('nbest_size')
if not enable_sampling:
a__: int = self.sp_model.EncodeAsPieces(lowercase)
else:
a__: List[Any] = self.sp_model.SampleEncodeAsPieces(lowercase , lowercase , lowercase)
a__: Dict = []
for pi, piece in enumerate(lowercase):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase) and pi != 0:
new_pieces.append(lowercase)
continue
else:
continue
a__: List[Any] = 0
for i, chunk in enumerate(lowercase):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase) or self.is_punct(lowercase):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(lowercase)
a__: Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
a__: Any = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
a__: List[str] = i
if len(lowercase) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
a__: List[str] = ''.join(lowercase).replace(lowercase , ' ').strip()
return out_string
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = self.convert_ids_to_tokens(lowercase)
a__: Union[str, Any] = ''.join(lowercase).replace(lowercase , ' ').strip()
return out_string
def lowerCamelCase_ ( self , lowercase) -> Tuple:
'''simple docstring'''
return self.vocab.get(lowercase , self.vocab.get(self.unk_token))
def lowerCamelCase_ ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
return self.reverse_vocab.get(lowercase , self.unk_token)
def lowerCamelCase_ ( self , lowercase , lowercase=None) -> Any:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__: List[Any] = [self.cls_token_id]
a__: Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCamelCase_ ( self , lowercase , lowercase=None) -> int:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCamelCase_ ( self , lowercase , lowercase=None , lowercase=False) -> int:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase)) + [1, 1] + ([0] * len(lowercase)) + [1]
return [1] + ([0] * len(lowercase)) + [1]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase) + 1) + [1] * (len(lowercase) + 3)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCamelCase_ ( self , lowercase) -> Tuple:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase) == 1:
a__: Dict = unicodedata.category(lowercase)
if cat == "Zs":
return True
return False
def lowerCamelCase_ ( self , lowercase) -> str:
'''simple docstring'''
a__: Tuple = {}
with io.open(lowercase , 'r' , encoding='utf-8') as f:
for index, line in enumerate(lowercase):
a__: Tuple = line.rstrip('\n')
a__: Optional[int] = int(lowercase)
return token_to_idx
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__: Dict = 0
if os.path.isdir(lowercase):
a__: Tuple = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
a__: Optional[int] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(lowercase , 'w' , encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowercase: kv[1]):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!')
a__: Any = token_index
writer.write(token + '\n')
index += 1
a__: str = os.path.join(lowercase , 'sentencepiece.bpe.model')
with open(lowercase , 'wb') as fi:
a__: str = self.sp_model.serialized_model_proto()
fi.write(lowercase)
return (vocab_file,)
| 217
| 1
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a__( snake_case__ ):
a_ : torch.FloatTensor
a_ : torch.FloatTensor
class a__( snake_case__ , snake_case__ ):
a_ : str = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.15 , _UpperCAmelCase = 0.01 , _UpperCAmelCase = 1_348.0 , _UpperCAmelCase = 1E-5 , _UpperCAmelCase = 1 , ) -> str:
# standard deviation of the initial noise distribution
snake_case__ =sigma_max
# setable values
snake_case__ =None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Tuple:
snake_case__ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
snake_case__ =torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> List[Any]:
snake_case__ =sigma_min if sigma_min is not None else self.config.sigma_min
snake_case__ =sigma_max if sigma_max is not None else self.config.sigma_max
snake_case__ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
snake_case__ =torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
snake_case__ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
snake_case__ =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
snake_case__ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
snake_case__ =timesteps.to(self.discrete_sigmas.device )
snake_case__ =self.discrete_sigmas[timesteps].to(sample.device )
snake_case__ =self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
snake_case__ =torch.zeros_like(_UpperCAmelCase )
snake_case__ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
snake_case__ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
snake_case__ =diffusion.unsqueeze(-1 )
snake_case__ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
snake_case__ =randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
snake_case__ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
snake_case__ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
snake_case__ =randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
snake_case__ =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
snake_case__ =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
snake_case__ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
snake_case__ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
snake_case__ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
snake_case__ =step_size.unsqueeze(-1 )
snake_case__ =sample + step_size * model_output
snake_case__ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ =timesteps.to(original_samples.device )
snake_case__ =self.discrete_sigmas.to(original_samples.device )[timesteps]
snake_case__ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
snake_case__ =noise + original_samples
return noisy_samples
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 538
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
enable_full_determinism()
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Dict = UNetaDModel
a_ : List[Any] = '''sample'''
@property
def _lowercase ( self ) -> Tuple:
snake_case__ =4
snake_case__ =3
snake_case__ =(32, 32)
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] ).to(_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Optional[int]:
return (3, 32, 32)
@property
def _lowercase ( self ) -> Optional[int]:
return (3, 32, 32)
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ ={
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Union[str, Any] = UNetaDModel
a_ : Optional[Any] = '''sample'''
@property
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =4
snake_case__ =4
snake_case__ =(32, 32)
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] ).to(_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Optional[int]:
return (4, 32, 32)
@property
def _lowercase ( self ) -> Dict:
return (4, 32, 32)
def _lowercase ( self ) -> str:
snake_case__ ={
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self ) -> Dict:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _lowercase ( self ) -> Optional[Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
model_accelerate.to(_UpperCAmelCase )
model_accelerate.eval()
snake_case__ =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ =noise.to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] * noise.shape[0] ).to(_UpperCAmelCase )
snake_case__ =model_accelerate(_UpperCAmelCase , _UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case__ , snake_case__ =UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase , low_cpu_mem_usage=_UpperCAmelCase )
model_normal_load.to(_UpperCAmelCase )
model_normal_load.eval()
snake_case__ =model_normal_load(_UpperCAmelCase , _UpperCAmelCase )['sample']
assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(_UpperCAmelCase )
snake_case__ =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ =noise.to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] * noise.shape[0] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 ) )
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : List[str] = UNetaDModel
a_ : Optional[int] = '''sample'''
@property
def _lowercase ( self , _UpperCAmelCase=(32, 32) ) -> Tuple:
snake_case__ =4
snake_case__ =3
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Union[str, Any]:
return (3, 32, 32)
@property
def _lowercase ( self ) -> Optional[Any]:
return (3, 32, 32)
def _lowercase ( self ) -> str:
snake_case__ ={
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
@slow
def _lowercase ( self ) -> List[Any]:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =self.dummy_input
snake_case__ =floats_tensor((4, 3) + (256, 256) ).to(_UpperCAmelCase )
snake_case__ =noise
snake_case__ =model(**_UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(_UpperCAmelCase )
snake_case__ =4
snake_case__ =3
snake_case__ =(256, 256)
snake_case__ =torch.ones((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [1E-4] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
def _lowercase ( self ) -> List[Any]:
snake_case__ =UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(_UpperCAmelCase )
snake_case__ =4
snake_case__ =3
snake_case__ =(32, 32)
snake_case__ =torch.ones((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [1E-4] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
def _lowercase ( self ) -> Optional[Any]:
# not required for this model
pass
| 538
| 1
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =42
__UpperCAmelCase =None
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__=0.999 , UpperCAmelCase__="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowerCAmelCase = []
for i in range(UpperCAmelCase__ ):
__lowerCAmelCase = i / num_diffusion_timesteps
__lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase__ ) / alpha_bar_fn(UpperCAmelCase__ ) , UpperCAmelCase__ ) )
return torch.tensor(UpperCAmelCase__ , dtype=torch.floataa )
class snake_case_ ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self , _A = 1_0_0_0 , _A = "fixed_small_log" , _A = True , _A = 1.0 , _A = "epsilon" , _A = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
__lowerCAmelCase = betas_for_alpha_bar(_A )
__lowerCAmelCase = 1.0 - self.betas
__lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
__lowerCAmelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__lowerCAmelCase = 1.0
# setable values
__lowerCAmelCase = None
__lowerCAmelCase = torch.from_numpy(np.arange(0 , _A )[::-1].copy() )
__lowerCAmelCase = variance_type
def A__ ( self , _A , _A = None ):
return sample
def A__ ( self , _A , _A = None ):
__lowerCAmelCase = num_inference_steps
__lowerCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__lowerCAmelCase = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__lowerCAmelCase = torch.from_numpy(_A ).to(_A )
def A__ ( self , _A , _A=None , _A=None , _A=None ):
if prev_timestep is None:
__lowerCAmelCase = t - 1
__lowerCAmelCase = self.alphas_cumprod[t]
__lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowerCAmelCase = 1 - alpha_prod_t
__lowerCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowerCAmelCase = self.betas[t]
else:
__lowerCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCAmelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__lowerCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__lowerCAmelCase = torch.log(torch.clamp(_A , min=1e-2_0 ) )
__lowerCAmelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__lowerCAmelCase = variance.log()
__lowerCAmelCase = beta.log()
__lowerCAmelCase = (predicted_variance + 1) / 2
__lowerCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def A__ ( self , _A , _A , _A , _A = None , _A=None , _A = True , ):
__lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__lowerCAmelCase, __lowerCAmelCase = torch.split(_A , sample.shape[1] , dim=1 )
else:
__lowerCAmelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
__lowerCAmelCase = t - 1
__lowerCAmelCase = self.alphas_cumprod[t]
__lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowerCAmelCase = 1 - alpha_prod_t
__lowerCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowerCAmelCase = self.betas[t]
__lowerCAmelCase = self.alphas[t]
else:
__lowerCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
__lowerCAmelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCAmelCase = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(
_A , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__lowerCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase = 0
if t > 0:
__lowerCAmelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_A , device=model_output.device )
__lowerCAmelCase = self._get_variance(
_A , predicted_variance=_A , prev_timestep=_A , )
if self.variance_type == "fixed_small_log":
__lowerCAmelCase = variance
elif self.variance_type == "learned_range":
__lowerCAmelCase = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
__lowerCAmelCase = variance * variance_noise
__lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def A__ ( self , _A , _A , _A , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__lowerCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__lowerCAmelCase = timesteps.to(original_samples.device )
__lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__lowerCAmelCase = sqrt_alpha_prod.unsqueeze(-1 )
__lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__lowerCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 102
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCamelCase = data_utils.TransfoXLTokenizer
lowerCamelCase = data_utils.TransfoXLCorpus
lowerCamelCase = data_utils
lowerCamelCase = data_utils
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase__ , 'rb' ) as fp:
__lowerCAmelCase = pickle.load(UpperCAmelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__lowerCAmelCase = corpus.vocab.__dict__
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ )
__lowerCAmelCase = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCAmelCase = os.path.abspath(UpperCAmelCase__ )
__lowerCAmelCase = os.path.abspath(UpperCAmelCase__ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCAmelCase = TransfoXLConfig()
else:
__lowerCAmelCase = TransfoXLConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCAmelCase = TransfoXLLMHeadModel(UpperCAmelCase__ )
__lowerCAmelCase = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F"""Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}""" )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F"""Save configuration file to {os.path.abspath(UpperCAmelCase__ )}""" )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 102
| 1
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __magic_name__ ( lowercase ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__ ) -> Tuple:
"""simple docstring"""
super().__init__()
lowercase_ : Union[str, Any] = module
lowercase_ : str = nn.Sequential(
nn.Linear(module.in_features, snake_case__, bias=snake_case__ ), nn.Linear(snake_case__, module.out_features, bias=snake_case__ ), )
lowercase_ : Tuple = (2.0 / (5 * min(module.in_features, module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight, std=snake_case__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case__ ( self, snake_case__, *snake_case__, **snake_case__ ) -> Tuple:
"""simple docstring"""
return self.module(snake_case__, *snake_case__, **snake_case__ ) + self.adapter(snake_case__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
__a : List[Any] = """bigscience/bloom-1b7"""
# Constant values
__a : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
__a : Optional[Any] = """Hello my name is"""
__a : List[Any] = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
__a : Dict = 10
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
# Models and tokenizer
lowercase_ : Tuple = AutoTokenizer.from_pretrained(self.model_name )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
lowercase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name, torch_dtype=torch.floataa, device_map="""auto""" )
lowercase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=snake_case__, device_map="""auto""" )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = self.model_abit.config
self.assertTrue(hasattr(snake_case__, """quantization_config""" ) )
lowercase_ : List[Any] = config.to_dict()
lowercase_ : Dict = config.to_diff_dict()
lowercase_ : List[str] = config.to_json_string()
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
lowercase_ : List[Any] = self.model_fpaa.get_memory_footprint()
lowercase_ : Dict = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit, self.EXPECTED_RELATIVE_DIFFERENCE )
lowercase_ : List[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case__, torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : List[str] = self.tokenizer(self.input_text, return_tensors="""pt""" )
lowercase_ : Optional[Any] = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=snake_case__ ), self.EXPECTED_OUTPUTS )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = BitsAndBytesConfig()
lowercase_ : List[str] = True
lowercase_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=snake_case__, device_map="""auto""" )
lowercase_ : Tuple = self.tokenizer(self.input_text, return_tensors="""pt""" )
lowercase_ : Any = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=snake_case__ ), self.EXPECTED_OUTPUTS )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(snake_case__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case__ )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Any = BitsAndBytesConfig()
with self.assertRaises(snake_case__ ):
lowercase_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=snake_case__, load_in_abit=snake_case__, device_map="""auto""", bnb_abit_quant_type="""nf4""", )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(snake_case__ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(snake_case__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case__ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(snake_case__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowercase_ : Optional[Any] = self.tokenizer(self.input_text, return_tensors="""pt""" )
lowercase_ : int = self.model_fpaa.to(torch.floataa )
lowercase_ : Any = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ), max_new_tokens=10 )
# Check this does not throw an error
lowercase_ : Dict = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
lowercase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowercase_ : Optional[int] = self.model_fpaa.float()
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""", load_in_abit=snake_case__, device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case__ ( cls ) -> Tuple:
"""simple docstring"""
lowercase_ : int = """t5-small"""
lowercase_ : Any = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(cls.model_name )
lowercase_ : Optional[Any] = """Translate in German: Hello, my dog is cute"""
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> Any:
"""simple docstring"""
from transformers import TaForConditionalGeneration
lowercase_ : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
lowercase_ : str = None
# test with `t5-small`
lowercase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=snake_case__, device_map="""auto""" )
lowercase_ : str = self.tokenizer(self.input_text, return_tensors="""pt""" ).to(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__ )
# test with `flan-t5-small`
lowercase_ : str = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_abit=snake_case__, device_map="""auto""" )
lowercase_ : int = self.tokenizer(self.input_text, return_tensors="""pt""" ).to(0 )
lowercase_ : Dict = model.generate(**snake_case__ )
lowercase_ : int = modules
def snake_case__ ( self ) -> str:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowercase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=snake_case__, device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linearabit ) )
lowercase_ : Tuple = self.tokenizer(self.input_text, return_tensors="""pt""" ).to(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__ )
# test with `flan-t5-small`
lowercase_ : Optional[int] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_abit=snake_case__, device_map="""auto""" )
lowercase_ : int = self.tokenizer(self.input_text, return_tensors="""pt""" ).to(0 )
lowercase_ : Optional[int] = model.generate(**snake_case__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
# model_name
lowercase_ : List[str] = """bigscience/bloom-560m"""
lowercase_ : List[str] = """t5-small"""
# Different types of model
lowercase_ : List[Any] = AutoModel.from_pretrained(self.model_name, load_in_abit=snake_case__, device_map="""auto""" )
# Sequence classification model
lowercase_ : str = AutoModelForSequenceClassification.from_pretrained(
self.model_name, load_in_abit=snake_case__, device_map="""auto""" )
# CausalLM model
lowercase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=snake_case__, device_map="""auto""" )
# Seq2seq model
lowercase_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name, load_in_abit=snake_case__, device_map="""auto""" )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = pipeline(
"""text-generation""", model=self.model_name, model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa}, max_new_tokens=self.MAX_NEW_TOKENS, )
# Real second forward pass
lowercase_ : Union[str, Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""], self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
super().setUp()
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name, load_in_abit=snake_case__, device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ), {0, 1} )
# Check that inference pass works on the model
lowercase_ : Tuple = self.tokenizer(self.input_text, return_tensors="""pt""" )
# Second real batch
lowercase_ : Dict = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=snake_case__ ), self.EXPECTED_OUTPUTS )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : Optional[Any] = """facebook/opt-350m"""
super().setUp()
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
lowercase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=snake_case__ )
self.assertEqual(set(model.hf_device_map.values() ), {torch.cuda.current_device()} )
for param in model.parameters():
lowercase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowercase_ : Dict = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case__ ) ):
lowercase_ : List[str] = LoRALayer(module.q_proj, rank=16 )
lowercase_ : str = LoRALayer(module.k_proj, rank=16 )
lowercase_ : str = LoRALayer(module.v_proj, rank=16 )
# Step 3: dummy batch
lowercase_ : Optional[Any] = self.tokenizer("""Test batch """, return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowercase_ : Optional[int] = model.forward(**snake_case__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case__, snake_case__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case__, nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Optional[Any] = """gpt2-xl"""
__a : str = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 458
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __magic_name__ ( ) -> str:
"""simple docstring"""
lowercase_ : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowercase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowercase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowercase )
return parser.parse_args()
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = parse_args()
# Import training_script as a module.
lowercase_ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ : Tuple = script_fpath.stem
lowercase_ : List[str] = importlib.import_module(lowercase )
# Patch sys.argv
lowercase_ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 458
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]="pt" ):
'''simple docstring'''
lowerCAmelCase = {"""add_prefix_space""": True} if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not line.startswith(""" """ ) else {}
lowerCAmelCase = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE , padding="""max_length""" if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=None , ):
'''simple docstring'''
lowerCAmelCase = input_ids.ne(SCREAMING_SNAKE_CASE ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase="train" , lowercase=None , lowercase=None , lowercase=None , lowercase="" , ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase = Path(lowercase ).joinpath(type_path + """.source""" )
lowerCAmelCase = Path(lowercase ).joinpath(type_path + """.target""" )
lowerCAmelCase = self.get_char_lens(self.src_file )
lowerCAmelCase = max_source_length
lowerCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
lowerCAmelCase = tokenizer
lowerCAmelCase = prefix
if n_obs is not None:
lowerCAmelCase = self.src_lens[:n_obs]
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
def __len__( self ) -> Optional[Any]:
return len(self.src_lens )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
lowerCAmelCase = index + 1 # linecache starts at 1
lowerCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("""\n""" )
lowerCAmelCase = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
lowerCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
lowerCAmelCase = encode_line(lowercase , lowercase , self.max_source_length , """right""" )
lowerCAmelCase = encode_line(lowercase , lowercase , self.max_target_length , """right""" )
lowerCAmelCase = source_inputs["""input_ids"""].squeeze()
lowerCAmelCase = target_inputs["""input_ids"""].squeeze()
lowerCAmelCase = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def _snake_case ( self , lowercase ) -> Dict[str, torch.Tensor]:
lowerCAmelCase = torch.stack([x["""input_ids"""] for x in batch] )
lowerCAmelCase = torch.stack([x["""attention_mask"""] for x in batch] )
lowerCAmelCase = torch.stack([x["""decoder_input_ids"""] for x in batch] )
lowerCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase = trim_batch(lowercase , lowercase )
lowerCAmelCase , lowerCAmelCase = trim_batch(lowercase , lowercase , attention_mask=lowercase )
lowerCAmelCase = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
SCREAMING_SNAKE_CASE__ = getLogger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = get_git_info()
save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , """git_log.json""" ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=4 , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
"""repo_id""": str(SCREAMING_SNAKE_CASE ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Callable , SCREAMING_SNAKE_CASE : Iterable ):
'''simple docstring'''
return list(map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , """wb""" ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
def remove_articles(SCREAMING_SNAKE_CASE : List[str] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , SCREAMING_SNAKE_CASE )
def white_space_fix(SCREAMING_SNAKE_CASE : int ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE : int ):
lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase = normalize_answer(SCREAMING_SNAKE_CASE ).split()
lowerCAmelCase = normalize_answer(SCREAMING_SNAKE_CASE ).split()
lowerCAmelCase = Counter(SCREAMING_SNAKE_CASE ) & Counter(SCREAMING_SNAKE_CASE )
lowerCAmelCase = sum(common.values() )
if num_same == 0:
return 0
lowerCAmelCase = 1.0 * num_same / len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = 1.0 * num_same / len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
em += exact_match_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
em /= len(SCREAMING_SNAKE_CASE )
return {"em": em}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase = """dropout_rate"""
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not hasattr(SCREAMING_SNAKE_CASE , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(SCREAMING_SNAKE_CASE ) )
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
lowerCAmelCase = p if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return hparams, config
| 393
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE__ = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
SCREAMING_SNAKE_CASE__ = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE__ = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE__ = "Normal"
if result[0][0] == 1:
SCREAMING_SNAKE_CASE__ = "Abnormality detected"
| 393
| 1
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int = 100 ) -> int:
"""simple docstring"""
UpperCAmelCase = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 373
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> None:
UpperCAmelCase = value
UpperCAmelCase = None
UpperCAmelCase = None
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> None:
UpperCAmelCase = tree
def a_ ( self , lowercase_ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 373
| 1
|
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Any ="""bertabs"""
def __init__( self , UpperCamelCase_=3_0522 , UpperCamelCase_=512 , UpperCamelCase_=6 , UpperCamelCase_=512 , UpperCamelCase_=8 , UpperCamelCase_=512 , UpperCamelCase_=0.2 , UpperCamelCase_=6 , UpperCamelCase_=768 , UpperCamelCase_=8 , UpperCamelCase_=2048 , UpperCamelCase_=0.2 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
lowercase_ :Any = vocab_size
lowercase_ :Tuple = max_pos
lowercase_ :List[Any] = enc_layers
lowercase_ :Any = enc_hidden_size
lowercase_ :Optional[int] = enc_heads
lowercase_ :List[str] = enc_ff_size
lowercase_ :int = enc_dropout
lowercase_ :str = dec_layers
lowercase_ :Dict = dec_hidden_size
lowercase_ :Any = dec_heads
lowercase_ :Optional[Any] = dec_ff_size
lowercase_ :Dict = dec_dropout
| 441
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , '''decord''' )
self.check_model_type(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ):
lowercase_ :int = {}
if frame_sampling_rate is not None:
lowercase_ :int = frame_sampling_rate
if num_frames is not None:
lowercase_ :int = num_frames
lowercase_ :str = {}
if top_k is not None:
lowercase_ :Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=1 ):
if num_frames is None:
lowercase_ :str = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowercase_ :str = BytesIO(requests.get(UpperCamelCase_ ).content )
lowercase_ :Optional[int] = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
lowercase_ :Tuple = 0
lowercase_ :Optional[Any] = num_frames * frame_sampling_rate - 1
lowercase_ :Any = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
lowercase_ :Dict = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
lowercase_ :List[Any] = list(UpperCamelCase_ )
lowercase_ :Any = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = self.model(**UpperCamelCase_ )
return model_outputs
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=5 ):
if top_k > self.model.config.num_labels:
lowercase_ :List[str] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ :Optional[int] = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ :Dict = probs.topk(UpperCamelCase_ )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
lowercase_ :Dict = scores.tolist()
lowercase_ :Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 441
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowercase : Dict = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowercase : List[str] = 0
lowercase : List[str] = 0Xe_000
lowercase : Optional[int] = 0Xe_001
lowercase : Union[str, Any] = 0Xe_002
lowercase : List[str] = 0Xe_003
lowercase : str = 0Xe_004
# Maps special codepoints to human-readable names.
lowercase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowercase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : int=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Tuple=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Tuple=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Union[str, Any]=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : int=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : List[Any]=2_0_4_8 , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , model_max_length=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase = UNICODE_VOCAB_SIZE
lowerCAmelCase = len(self._special_codepoints )
@property
def __A ( self : List[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def __A ( self : str , SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
return list(SCREAMING_SNAKE_CASE )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
try:
return ord(SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def __A ( self : str , SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def __A ( self : Any , SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
return "".join(SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return result
def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> str:
"""simple docstring"""
return ()
| 649
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int=1_3 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : int=9_9 , SCREAMING_SNAKE_CASE : int=3_2 , SCREAMING_SNAKE_CASE : Optional[Any]=5 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : str=3_7 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE : str=1_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : Any=4 , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __A ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = FlaxRobertaModelTester(self )
@slow
def __A ( self : Any ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("roberta-base" , from_pt=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 649
| 1
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
snake_case_ = 'pixel_values'
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self : Union[str, Any] , snake_case : List[str] , **snake_case : List[Any] ):
'''simple docstring'''
requires_backends(self , """timm""" )
super().__init__(snake_case )
A__ : Optional[int] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(snake_case , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
A__ : Optional[Any] = getattr(snake_case , """use_pretrained_backbone""" , snake_case )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
A__ : str = config.out_indices if getattr(snake_case , """out_indices""" , snake_case ) is not None else (-1,)
A__ : int = timm.create_model(
config.backbone , pretrained=snake_case , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case , **snake_case , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A__ : Optional[int] = self._backbone.return_layers
A__ : int = {layer["""module"""]: str(snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case )
@classmethod
def _UpperCamelCase ( cls : Any , snake_case : Union[str, Any] , *snake_case : Tuple , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
A__ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
A__ : Dict = kwargs.pop("""use_timm_backbone""" , snake_case )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
A__ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
A__ : Optional[int] = kwargs.pop("""features_only""" , config.features_only )
A__ : int = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
A__ : int = kwargs.pop("""out_indices""" , config.out_indices )
A__ : List[Any] = TimmBackboneConfig(
backbone=snake_case , num_channels=snake_case , features_only=snake_case , use_pretrained_backbone=snake_case , out_indices=snake_case , )
return super()._from_config(snake_case , **snake_case )
def _UpperCamelCase ( self : List[Any] , snake_case : Union[str, Any] ):
'''simple docstring'''
pass
def _UpperCamelCase ( self : List[Any] , snake_case : Optional[Any] , snake_case : Dict=None , snake_case : str=None , snake_case : Union[str, Any]=None , **snake_case : str ):
'''simple docstring'''
A__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A__ : int = self._all_layers
A__ : Dict = self._backbone(snake_case , **snake_case )
A__ : int = self._return_layers
A__ : Optional[Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
A__ : Optional[int] = self._backbone(snake_case , **snake_case )
A__ : Union[str, Any] = None
A__ : int = tuple(snake_case )
A__ : int = tuple(snake_case ) if hidden_states is not None else None
if not return_dict:
A__ : Optional[Any] = (feature_maps,)
if output_hidden_states:
A__ : int = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case , hidden_states=snake_case , attentions=snake_case )
| 498
|
"""simple docstring"""
from __future__ import annotations
A_ = []
def _lowerCAmelCase ( UpperCAmelCase__ : list[list[int]], UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->bool:
for i in range(len(UpperCAmelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCAmelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCAmelCase__, -1, -1 ), range(UpperCAmelCase__, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCAmelCase__, -1, -1 ), range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def _lowerCAmelCase ( UpperCAmelCase__ : list[list[int]], UpperCAmelCase__ : int ) ->bool:
if row >= len(UpperCAmelCase__ ):
solution.append(UpperCAmelCase__ )
printboard(UpperCAmelCase__ )
print()
return True
for i in range(len(UpperCAmelCase__ ) ):
if is_safe(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
A__ : Tuple = 1
solve(UpperCAmelCase__, row + 1 )
A__ : Optional[int] = 0
return False
def _lowerCAmelCase ( UpperCAmelCase__ : list[list[int]] ) ->None:
for i in range(len(UpperCAmelCase__ ) ):
for j in range(len(UpperCAmelCase__ ) ):
if board[i][j] == 1:
print("""Q""", end=""" """ )
else:
print(""".""", end=""" """ )
print()
# n=int(input("The no. of queens"))
A_ = 8
A_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 498
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
snake_case = random.Random()
def lowerCamelCase__ ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ):
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE : str = global_rng
SCREAMING_SNAKE_CASE : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : List[Any]=400 , UpperCAmelCase_ : int=2000 , UpperCAmelCase_ : Dict=2048 , UpperCAmelCase_ : Any=128 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : Dict=4_4100 , ):
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = min_seq_length
SCREAMING_SNAKE_CASE : str = max_seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = feature_size
SCREAMING_SNAKE_CASE : Any = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[str] = chunk_length
SCREAMING_SNAKE_CASE : Any = sampling_rate
def _A ( self : str ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _A ( self : Any , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=False ):
def _flatten(UpperCAmelCase_ : str ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = TvltFeatureExtractor
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TvltFeatureExtractionTester(self )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "spectrogram_length" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "feature_size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "num_audio_channels" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "hop_length" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "chunk_length" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "sampling_rate" ) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : Tuple = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = dict_first.pop("mel_filters" )
SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCAmelCase_ , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : Any = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Any = dict_first.pop("mel_filters" )
SCREAMING_SNAKE_CASE : Any = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
# Initialize feature_extractor
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Dict = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(UpperCAmelCase_ , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(
UpperCAmelCase_ , return_tensors="np" , sampling_rate=4_4100 , mask_audio=UpperCAmelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(UpperCAmelCase_ , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("id" ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Any = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCAmelCase_ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCAmelCase_ , atol=1E-4 ) )
| 62
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_UpperCamelCase : int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
| 0
|
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
snake_case_ : Optional[Any] = namedtuple('covid_data', 'cases deaths recovered')
def __snake_case ( _UpperCAmelCase : str = "https://www.worldometers.info/coronavirus/"):
UpperCamelCase = '''//div[@class = \"maincounter-number\"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_SCREAMING_SNAKE_CASE).content).xpath(_SCREAMING_SNAKE_CASE))
snake_case_ : int = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 702
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''encodec'''
def __init__( self , lowerCamelCase__=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase__=2_4_0_0_0 , lowerCamelCase__=1 , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=1_2_8 , lowerCamelCase__=3_2 , lowerCamelCase__=1 , lowerCamelCase__=[8, 5, 4, 2] , lowerCamelCase__="weight_norm" , lowerCamelCase__=7 , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__="reflect" , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=1.0 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=None , lowerCamelCase__=True , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = target_bandwidths
UpperCamelCase = sampling_rate
UpperCamelCase = audio_channels
UpperCamelCase = normalize
UpperCamelCase = chunk_length_s
UpperCamelCase = overlap
UpperCamelCase = hidden_size
UpperCamelCase = num_filters
UpperCamelCase = num_residual_layers
UpperCamelCase = upsampling_ratios
UpperCamelCase = norm_type
UpperCamelCase = kernel_size
UpperCamelCase = last_kernel_size
UpperCamelCase = residual_kernel_size
UpperCamelCase = dilation_growth_rate
UpperCamelCase = use_causal_conv
UpperCamelCase = pad_mode
UpperCamelCase = compress
UpperCamelCase = num_lstm_layers
UpperCamelCase = trim_right_ratio
UpperCamelCase = codebook_size
UpperCamelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowerCamelCase__ )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 350
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : Tuple ={
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] =[
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] =[
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =[
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 483
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple =logging.get_logger(__name__)
A_ : int ={
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case_ = '''efficientformer'''
def __init__( self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [48, 96, 224, 448] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 448 , _UpperCAmelCase = 32 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 16 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = 224 , _UpperCAmelCase = 1e-0_5 , **_UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = hidden_sizes
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = num_channels
a_ = depths
a_ = mlp_expansion_ratio
a_ = downsamples
a_ = dim
a_ = key_dim
a_ = attention_ratio
a_ = resolution
a_ = pool_size
a_ = downsample_patch_size
a_ = downsample_stride
a_ = downsample_pad
a_ = drop_path_rate
a_ = num_metaad_blocks
a_ = distillation
a_ = use_layer_scale
a_ = layer_scale_init_value
a_ = image_size
a_ = batch_norm_eps
| 483
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284
|
import csv
import tweepy
# Twitter API credentials
__lowerCAmelCase : str = ""
__lowerCAmelCase : Any = ""
__lowerCAmelCase : Any = ""
__lowerCAmelCase : Optional[int] = ""
def UpperCAmelCase_ ( __lowerCAmelCase ) -> None:
# authorize twitter, initialize tweepy
__lowercase : List[Any] = tweepy.OAuthHandler(__lowerCAmelCase , __lowerCAmelCase )
auth.set_access_token(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : str = tweepy.API(__lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
__lowercase : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase : List[Any] = api.user_timeline(screen_name=__lowerCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(__lowerCAmelCase )
# save the id of the oldest tweet less one
__lowercase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__lowerCAmelCase ) > 0:
print(F'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase : Tuple = api.user_timeline(
screen_name=__lowerCAmelCase , count=200 , max_id=__lowerCAmelCase )
# save most recent tweets
alltweets.extend(__lowerCAmelCase )
# update the id of the oldest tweet less one
__lowercase : Any = alltweets[-1].id - 1
print(F'...{len(__lowerCAmelCase )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase : Optional[int] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'new_{screen_name}_tweets.csv' , '''w''' ) as f:
__lowercase : int = csv.writer(__lowerCAmelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 284
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
SCREAMING_SNAKE_CASE : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
with open(A_ , '''rb''' ) as f:
lowercase_ :Dict = Image.open(A_ )
return im.convert('''RGB''' )
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : int =field(
default=lowerCamelCase__ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase : Optional[Any] =field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
lowercase : Tuple =field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
lowercase : Any =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase : str =field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : Optional[int] =field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowercase : Optional[Any] =field(
default=lowerCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCamelCase__ )} , )
lowercase : Optional[Any] =field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase : Dict =field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase : Tuple =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase : Dict =field(default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase : Optional[Any] =field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase : Union[str, Any] =field(
default=lowerCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCamelCase ( _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :int = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase_ :Union[str, Any] = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ :Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ :int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , A_ , A_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ :Tuple = training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowercase_ :Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ :Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase_ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase_ :Any = {}
if data_args.train_dir is not None:
lowercase_ :Dict = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
lowercase_ :str = os.path.join(data_args.validation_dir , '''**''' )
lowercase_ :Any = load_dataset(
'''imagefolder''' , data_files=A_ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ :Union[str, Any] = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A_ ) and data_args.train_val_split > 0.0:
lowercase_ :Optional[Any] = dataset['''train'''].train_test_split(data_args.train_val_split )
lowercase_ :str = split['''train''']
lowercase_ :Optional[Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase_ :str = dataset['''train'''].features['''labels'''].names
lowercase_ , lowercase_ :Union[str, Any] = {}, {}
for i, label in enumerate(A_ ):
lowercase_ :Tuple = str(A_ )
lowercase_ :str = label
# Load the accuracy metric from the datasets package
lowercase_ :Union[str, Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase_ :Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A_ ) , labelaid=A_ , idalabel=A_ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ :Any = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase_ :Tuple = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase_ :Union[str, Any] = image_processor.size['''shortest_edge''']
else:
lowercase_ :Dict = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase_ :Dict = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase_ :List[str] = Compose(
[
RandomResizedCrop(A_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase_ :Union[str, Any] = Compose(
[
Resize(A_ ),
CenterCrop(A_ ),
ToTensor(),
normalize,
] )
def train_transforms(_a ):
lowercase_ :List[Any] = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_a ):
lowercase_ :List[str] = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase_ :str = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase_ :Dict = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A_ )
# Initalize our trainer
lowercase_ :List[Any] = Trainer(
model=A_ , args=A_ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=A_ , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
lowercase_ :List[str] = None
if training_args.resume_from_checkpoint is not None:
lowercase_ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ :int = last_checkpoint
lowercase_ :Tuple = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ :Tuple = trainer.evaluate()
trainer.log_metrics('''eval''' , A_ )
trainer.save_metrics('''eval''' , A_ )
# Write model card and (optionally) push to hub
lowercase_ :Any = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
if __name__ == "__main__":
main()
| 257
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Any = logging.get_logger(__name__)
def A__ ( A_ , A_ ) -> List[Any]:
_lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A__ ( A_ , A_ ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowercase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A__ ( A_ , A_ , A_ ) -> str:
_lowercase = dct.pop(A_ )
_lowercase = val
def A__ ( A_ ) -> Optional[int]:
if "handwritten" in checkpoint_url:
_lowercase = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A__ ( A_ , A_ ) -> str:
_lowercase = ViTConfig(image_size=384 , qkv_bias=A_ )
_lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowercase = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowercase = 1_024
_lowercase = 4_096
_lowercase = 24
_lowercase = 16
_lowercase = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase = False
_lowercase = "relu"
_lowercase = 1_024
_lowercase = True
_lowercase = False
_lowercase = False
# load HuggingFace model
_lowercase = ViTModel(A_ , add_pooling_layer=A_ )
_lowercase = TrOCRForCausalLM(A_ )
_lowercase = VisionEncoderDecoderModel(encoder=A_ , decoder=A_ )
model.eval()
# load state_dict of original model, rename some keys
_lowercase = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" , check_hash=A_ )["model"]
_lowercase = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowercase = state_dict.pop(A_ )
if key.startswith("decoder" ) and "output_projection" not in key:
_lowercase = val
else:
_lowercase = val
# load state dict
model.load_state_dict(A_ )
# Check outputs on an image
_lowercase = ViTImageProcessor(size=encoder_config.image_size )
_lowercase = RobertaTokenizer.from_pretrained("roberta-large" )
_lowercase = TrOCRProcessor(A_ , A_ )
_lowercase = processor(images=prepare_img(A_ ) , return_tensors="pt" ).pixel_values
# verify logits
_lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowercase = model(pixel_values=A_ , decoder_input_ids=A_ )
_lowercase = outputs.logits
_lowercase = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
_lowercase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowercase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_lowercase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_lowercase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , A_ , atol=1e-3 ), "First elements of logits not as expected"
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A_ )
if __name__ == "__main__":
__magic_name__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__magic_name__ : List[Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 497
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_lowerCAmelCase = "docs/source/en/_toctree.yml"
def _lowerCAmelCase ( lowercase : Any ) ->str:
"""simple docstring"""
lowercase__ = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(snake_case__ , key=lambda lowercase : s["title"].lower() )
def _lowerCAmelCase ( lowercase : Optional[int]=False ) ->Dict:
"""simple docstring"""
with open(snake_case__ , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]["""sections"""]
# Then to the model doc
lowercase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase__ = api_doc[model_idx]["""sections"""]
lowercase__ = [(idx, section) for idx, section in enumerate(snake_case__ ) if """sections""" in section]
lowercase__ = False
for idx, modality_doc in modalities_docs:
lowercase__ = modality_doc["""sections"""]
lowercase__ = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_modality_doc
if diff:
if overwrite:
lowercase__ = model_doc
lowercase__ = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 706
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Dict , lowercase : int ) ->List[Any]:
"""simple docstring"""
lowercase__ = BertConfig.from_json_file(lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase__ = BertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE_ = Features({'summary': Value('string' )} )
SCREAMING_SNAKE_CASE_ = 'text'
SCREAMING_SNAKE_CASE_ = 'summary'
@property
def UpperCamelCase( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 42
|
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __UpperCamelCase (unittest.TestCase ):
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertAlmostEqual(_lowerCAmelCase , _lowerCAmelCase , delta=_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowerCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = None
ops.enable_eager_execution_internal()
lowercase = tf.config.list_physical_devices("""CPU""" )
if len(_lowerCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowercase = tf.config.list_logical_devices(device_type="""CPU""" )
lowercase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowercase = GradientAccumulator()
lowercase = tf.Variable([4.0, 3.0] )
lowercase , lowercase = create_optimizer(5E-5 , 10 , 5 )
lowercase = tf.Variable([0.0, 0.0] , trainable=_lowerCAmelCase )
def accumulate_on_replica(_lowerCAmelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_lowerCAmelCase , _lowerCAmelCase ):
with strategy.scope():
lowercase = strategy.experimental_local_results(_lowerCAmelCase )
local_variables[0].assign(_lowerCAmelCase )
local_variables[1].assign(_lowerCAmelCase )
strategy.run(_lowerCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowerCAmelCase )
def _check_local_values(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowerCAmelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _lowerCAmelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 588
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = ["input_features"]
def __init__( self , a__=80 , a__=1_60_00 , a__=1_60 , a__=30 , a__=4_00 , a__=0.0 , a__=False , **a__ , ):
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_lowerCamelCase = n_fft
_lowerCamelCase = hop_length
_lowerCamelCase = chunk_length
_lowerCamelCase = chunk_length * sampling_rate
_lowerCamelCase = self.n_samples // hop_length
_lowerCamelCase = sampling_rate
_lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=a__ , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = spectrogram(
a__ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
_lowerCamelCase = log_spec[:, :-1]
_lowerCamelCase = np.maximum(a__ , log_spec.max() - 8.0 )
_lowerCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _UpperCAmelCase ( a__ , a__ , a__ = 0.0 ):
if attention_mask is not None:
_lowerCamelCase = np.array(a__ , np.intaa )
_lowerCamelCase = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_lowerCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowerCamelCase = padding_value
normed_input_values.append(a__ )
else:
_lowerCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , a__ , a__ = True , a__ = None , a__ = None , a__ = None , a__ = "max_length" , a__ = None , a__ = None , a__ = None , **a__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCamelCase = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowerCamelCase = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_lowerCamelCase = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase = [np.asarray([raw_speech] ).T]
_lowerCamelCase = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
_lowerCamelCase = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_lowerCamelCase = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
_lowerCamelCase = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
_lowerCamelCase = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
_lowerCamelCase = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_lowerCamelCase = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_lowerCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowerCamelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
_lowerCamelCase = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def _UpperCAmelCase ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 297
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = ["image_processor", "tokenizer"]
_UpperCamelCase = "FlavaImageProcessor"
_UpperCamelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a__=None , a__=None , **a__ ):
_lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
_lowerCamelCase = kwargs.pop('''feature_extractor''' )
_lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
_lowerCamelCase = self.image_processor
def __call__( self , a__ = None , a__ = None , a__ = True , a__ = False , a__ = False , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_lowerCamelCase = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
if images is not None:
_lowerCamelCase = self.image_processor(
a__ , return_image_mask=a__ , return_codebook_pixels=a__ , return_tensors=a__ , **a__ , )
if text is not None and images is not None:
encoding.update(a__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def _UpperCAmelCase ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def _UpperCAmelCase ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.tokenizer.model_input_names
_lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 297
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
a : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Dict , *a_ : Any , **a_ : Tuple ):
"""simple docstring"""
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 69
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
A__ : str = CycleDiffusionPipeline
A__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
A__ : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
A__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
A__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
A__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self ) -> Dict:
torch.manual_seed(0 )
A: List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A: Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=10_00 , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
A: Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A: int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A: Tuple = CLIPTextModel(A )
A: List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A: List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self , A , A=0 ) -> List[str]:
A: int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
A: Tuple = image / 2 + 0.5
if str(A ).startswith("""mps""" ):
A: Any = torch.manual_seed(A )
else:
A: str = torch.Generator(device=A ).manual_seed(A )
A: Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Optional[int]:
A: Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
A: Optional[Any] = self.get_dummy_components()
A: List[str] = CycleDiffusionPipeline(**A )
A: List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
A: Union[str, Any] = self.get_dummy_inputs(A )
A: Optional[Any] = pipe(**A )
A: Optional[int] = output.images
A: List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A: List[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def a__ ( self ) -> Optional[Any]:
A: Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(A , """half""" ):
A: Optional[int] = module.half()
A: int = CycleDiffusionPipeline(**A )
A: int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
A: int = self.get_dummy_inputs(A )
A: Union[str, Any] = pipe(**A )
A: Union[str, Any] = output.images
A: List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A: str = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a__ ( self ) -> Union[str, Any]:
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def a__ ( self ) -> str:
return super().test_inference_batch_single_identical()
@skip_mps
def a__ ( self ) -> Tuple:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a__ ( self ) -> Dict:
return super().test_save_load_optional_components()
@skip_mps
def a__ ( self ) -> Dict:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Optional[Any]:
A: List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A: List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A: List[str] = init_image.resize((5_12, 5_12) )
A: Optional[int] = """CompVis/stable-diffusion-v1-4"""
A: str = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
A: List[str] = CycleDiffusionPipeline.from_pretrained(
A , scheduler=A , safety_checker=A , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
A: Optional[int] = """A black colored car"""
A: Tuple = """A blue colored car"""
A: Optional[Any] = torch.manual_seed(0 )
A: str = pipe(
prompt=A , source_prompt=A , image=A , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=A , output_type="""np""" , )
A: Any = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def a__ ( self ) -> Any:
A: int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A: Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A: Optional[int] = init_image.resize((5_12, 5_12) )
A: Union[str, Any] = """CompVis/stable-diffusion-v1-4"""
A: Optional[Any] = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
A: Union[str, Any] = CycleDiffusionPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
A: Tuple = """A black colored car"""
A: Union[str, Any] = """A blue colored car"""
A: Optional[Any] = torch.manual_seed(0 )
A: Union[str, Any] = pipe(
prompt=A , source_prompt=A , image=A , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=A , output_type="""np""" , )
A: Any = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 135
| 0
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ : Optional[int] = '''src/diffusers'''
lowercase_ : int = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ : List[Any] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ : Dict = spec.loader.load_module()
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict ) -> Dict:
return line.startswith(lowerCamelCase__ ) or len(lowerCamelCase__ ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", lowerCamelCase__ ) is not None
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = object_name.split("." )
_SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : str = parts[i]
while i < len(lowerCamelCase__ ) and not os.path.isfile(os.path.join(lowerCamelCase__, f'''{module}.py''' ) ):
i += 1
if i < len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : str = os.path.join(lowerCamelCase__, parts[i] )
if i >= len(lowerCamelCase__ ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(lowerCamelCase__, f'''{module}.py''' ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : Any = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Tuple = ""
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase__ ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase__ ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[Any] = line_index
while line_index < len(lowerCamelCase__ ) and _should_continue(lines[line_index], lowerCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[start_index:line_index]
return "".join(lowerCamelCase__ )
lowercase_ : Optional[int] = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowercase_ : Dict = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowercase_ : Optional[int] = re.compile(R'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( lowerCamelCase__ : List[str] ) -> Dict:
_SCREAMING_SNAKE_CASE : Tuple = code.split("\n" )
_SCREAMING_SNAKE_CASE : List[str] = 0
while idx < len(lowerCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCamelCase__ ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( lowerCamelCase__ : Any ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = len(get_indent(lowerCamelCase__ ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : List[Any] = f'''class Bla:\n{code}'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Dict = black.format_str(lowerCamelCase__, mode=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = style_docstrings_in_code(lowerCamelCase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : int=False ) -> List[str]:
with open(lowerCamelCase__, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : int = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
_SCREAMING_SNAKE_CASE : List[str] = find_code_in_diffusers(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = get_indent(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : List[str] = theoretical_indent
_SCREAMING_SNAKE_CASE : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(lowerCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase__ ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : List[Any] = _should_continue(lowerCamelCase__, lowerCamelCase__ ) and re.search(f'''^{indent}# End copy''', lowerCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Any = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : int = "".join(lowerCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Optional[int] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCamelCase__ ) is None]
_SCREAMING_SNAKE_CASE : List[Any] = "\n".join(lowerCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase__ ) > 0:
_SCREAMING_SNAKE_CASE : Any = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : int = [_re_replace_pattern.search(lowerCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = pattern.groups()
_SCREAMING_SNAKE_CASE : Optional[int] = re.sub(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : str = re.sub(obja.lower(), obja.lower(), lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper(), obja.upper(), lowerCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : List[Any] = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : Tuple = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : Optional[int] = start_index + 1
if overwrite and len(lowerCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(lowerCamelCase__, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lowerCamelCase__ )
return diffs
def _lowerCAmelCase ( lowerCamelCase__ : bool = False ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = glob.glob(os.path.join(lowerCamelCase__, "**/*.py" ), recursive=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[int] = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(lowerCamelCase__, lowerCamelCase__ )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(lowerCamelCase__ ) > 0:
_SCREAMING_SNAKE_CASE : Tuple = "\n".join(lowerCamelCase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase_ : List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 295
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase_ : Tuple = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : int=None, lowerCamelCase__ : Any=None, lowerCamelCase__ : Any=None ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = True
while ask_again:
_SCREAMING_SNAKE_CASE : List[str] = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict=[], lowerCamelCase__ : Optional[int]=None, lowerCamelCase__ : str=0 ) -> str:
_SCREAMING_SNAKE_CASE : int = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Dict:
_SCREAMING_SNAKE_CASE : int = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 295
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase = 12_8022
UpperCamelCase = 12_8028
@require_sentencepiece
class _A ( UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Dict = MaMaaaTokenizer
lowercase_ : Tuple = False
lowercase_ : Optional[Any] = False
lowercase_ : Tuple = True
def a ( self : List[Any] ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : str = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__UpperCamelCase : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : List[str] = Path(self.tmpdirname )
save_json(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__UpperCamelCase : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : Tuple , **lowerCamelCase__ : Tuple ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def a ( self : Any , lowerCamelCase__ : List[str] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = """</s>"""
__UpperCamelCase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : str = self.get_tokenizer()
__UpperCamelCase : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(lowerCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def a ( self : str ):
"""simple docstring"""
pass
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.get_tokenizer()
__UpperCamelCase : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2, 3, 4, 5, 6] , )
__UpperCamelCase : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
__UpperCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , """This is a test""" )
@slow
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Dict = {"""input_ids""": [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
lowercase_ : Any = '''facebook/m2m100_418M'''
lowercase_ : List[Any] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
lowercase_ : Optional[int] = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
lowercase_ : int = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def a ( cls : List[str] ):
"""simple docstring"""
__UpperCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
__UpperCamelCase : Tuple = 1
return cls
def a ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_80_06 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_80_22 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_80_76 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_80_63 )
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : int = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , lowerCamelCase__ )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = """en"""
__UpperCamelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def a ( self : Tuple ):
"""simple docstring"""
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
__UpperCamelCase : Optional[Any] = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
__UpperCamelCase : Union[str, Any] = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : List[str] = tempfile.mkdtemp()
__UpperCamelCase : Dict = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = MaMaaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCamelCase__ )
@require_torch
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Tuple = """en"""
__UpperCamelCase : Union[str, Any] = """fr"""
__UpperCamelCase : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors="""pt""" )
__UpperCamelCase : Union[str, Any] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__UpperCamelCase : List[str] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : List[str] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__UpperCamelCase : Tuple = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : List[Any] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__UpperCamelCase : int = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase : str = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[12_80_22, 58, 41_83, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_80_06,
} , )
| 269
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Union[str, Any] = LDMTextToImagePipeline
lowercase_ : Tuple = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
lowercase_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowercase_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : Tuple = False
def a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Any = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCamelCase : List[str] = CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCamelCase : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def a ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith("""mps""" ):
__UpperCamelCase : str = torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : Optional[int] = LDMTextToImagePipeline(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[Any] = self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCamelCase : int = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def a ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=torch.floataa , lowerCamelCase__ : List[Any]=0 ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 32, 32) )
__UpperCamelCase : Any = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Any = self.get_inputs(lowerCamelCase__ )
__UpperCamelCase : Tuple = pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
__UpperCamelCase : Optional[Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
__UpperCamelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=torch.floataa , lowerCamelCase__ : Tuple=0 ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 32, 32) )
__UpperCamelCase : List[Any] = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Tuple = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Dict = self.get_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ ).images[0]
__UpperCamelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__UpperCamelCase : int = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 269
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
def lowerCamelCase__ (__lowerCamelCase ):
# initialize config
if "resnet-50" in model_name:
_SCREAMING_SNAKE_CASE : List[Any] = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
_SCREAMING_SNAKE_CASE : Any = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
_SCREAMING_SNAKE_CASE : Any = DetrConfig(use_timm_backbone=__lowerCamelCase, backbone_config=__lowerCamelCase )
# set label attributes
_SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
_SCREAMING_SNAKE_CASE : int = 250
else:
_SCREAMING_SNAKE_CASE : int = 91
_SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
_SCREAMING_SNAKE_CASE : Optional[Any] = "coco-detection-id2label.json"
_SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : str = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : List[Any] = idalabel
_SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ (__lowerCamelCase ):
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE : List[str] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = val
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : List[Any] = ""
if is_panoptic:
_SCREAMING_SNAKE_CASE : Tuple = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : int = in_proj_weight[:256, :]
_SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:256]
_SCREAMING_SNAKE_CASE : Dict = in_proj_weight[256:512, :]
_SCREAMING_SNAKE_CASE : str = in_proj_bias[256:512]
_SCREAMING_SNAKE_CASE : Any = in_proj_weight[-256:, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : int = in_proj_weight[:256, :]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias[:256]
_SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[256:512, :]
_SCREAMING_SNAKE_CASE : int = in_proj_bias[256:512]
_SCREAMING_SNAKE_CASE : Dict = in_proj_weight[-256:, :]
_SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_SCREAMING_SNAKE_CASE : str = in_proj_weight_cross_attn[:256, :]
_SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias_cross_attn[:256]
_SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias_cross_attn[256:512]
_SCREAMING_SNAKE_CASE : str = in_proj_weight_cross_attn[-256:, :]
_SCREAMING_SNAKE_CASE : str = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
_SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = get_detr_config(__lowerCamelCase )
# load original model from torch hub
_SCREAMING_SNAKE_CASE : Dict = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"""Converting model {model_name}...""" )
_SCREAMING_SNAKE_CASE : List[str] = torch.hub.load("facebookresearch/detr", model_name_to_original_name[model_name], pretrained=__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__lowerCamelCase ):
if is_panoptic:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "detr." + src
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCamelCase, is_panoptic=__lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_SCREAMING_SNAKE_CASE : Union[str, Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_SCREAMING_SNAKE_CASE : Any = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = val
# finally, create HuggingFace model and load state dict
_SCREAMING_SNAKE_CASE : List[Any] = DetrForSegmentation(__lowerCamelCase ) if is_panoptic else DetrForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify our conversion on an image
_SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
_SCREAMING_SNAKE_CASE : Tuple = DetrImageProcessor(format=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = processor(images=prepare_img(), return_tensors="pt" )
_SCREAMING_SNAKE_CASE : List[str] = encoding["pixel_values"]
_SCREAMING_SNAKE_CASE : str = detr(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = model(__lowerCamelCase )
assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-3 )
assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase__ =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 381
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCamelCase ) - 1
def UpperCamelCase_ ( self , __lowerCamelCase ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_SCREAMING_SNAKE_CASE : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCamelCase ) , 5 ) == 1
return output_values
def UpperCamelCase_ ( self , __lowerCamelCase ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_SCREAMING_SNAKE_CASE : Optional[int] = self.basis_function(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0.0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase_ ( self , __lowerCamelCase = 0.01 ) -> int:
from matplotlib import pyplot as plt # type: ignore
_SCREAMING_SNAKE_CASE : list[float] = [] # x coordinates of points to plot
_SCREAMING_SNAKE_CASE : list[float] = [] # y coordinates of points to plot
_SCREAMING_SNAKE_CASE : Dict = 0.0
while t <= 1:
_SCREAMING_SNAKE_CASE : str = self.bezier_curve_function(__lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_SCREAMING_SNAKE_CASE : List[Any] = [i[0] for i in self.list_of_points]
_SCREAMING_SNAKE_CASE : Dict = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 381
| 1
|
'''simple docstring'''
from __future__ import annotations
class a_ :
def __init__( self : List[str] , lowercase : Optional[Any]=None ):
"""simple docstring"""
lowercase_ :Optional[int] = data
lowercase_ :int = None
def __repr__( self : Dict ):
"""simple docstring"""
lowercase_ :Any = []
lowercase_ :Tuple = self
while temp:
string_rep.append(F'{temp.data}' )
lowercase_ :Optional[Any] = temp.next
return "->".join(lowercase )
def UpperCAmelCase_ ( __lowerCamelCase : list ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase_ :int = Node(elements_list[0] )
for i in range(1 ,len(__lowerCamelCase ) ):
lowercase_ :Optional[Any] = Node(elements_list[i] )
lowercase_ :Optional[Any] = current.next
return head
def UpperCAmelCase_ ( __lowerCamelCase : Node ):
if head_node is not None and isinstance(__lowerCamelCase ,__lowerCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCAmelCase_ ( ):
from doctest import testmod
testmod()
lowercase_ :Dict = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__lowerCamelCase )
print("Elements in Reverse:" )
print_reverse(__lowerCamelCase )
if __name__ == "__main__":
main()
| 172
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] =[
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 172
| 1
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
A_ : Optional[Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
A_ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
A_ : Optional[int] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
], )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(snake_case__, snake_case__, sample_weight=snake_case__ ) ),
}
| 706
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=128 , snake_case=32 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self ) -> Optional[int]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self ) -> int:
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = NezhaModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_UpperCAmelCase = model(snake_case , token_type_ids=snake_case )
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[str]:
_UpperCAmelCase = True
_UpperCAmelCase = NezhaModel(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
_UpperCAmelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = NezhaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = NezhaForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = NezhaForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = NezhaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NezhaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NezhaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NezhaForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=False ) -> Optional[Any]:
_UpperCAmelCase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = NezhaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCamelCase_ ( self ) -> Union[str, Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NezhaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@slow
@require_torch_gpu
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_UpperCAmelCase = True
_UpperCAmelCase = model_class(config=snake_case )
_UpperCAmelCase = self._prepare_for_class(snake_case , snake_case )
_UpperCAmelCase = torch.jit.trace(
snake_case , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case , os.path.join(snake_case , 'bert.pt' ) )
_UpperCAmelCase = torch.jit.load(os.path.join(snake_case , 'bert.pt' ) , map_location=snake_case )
loaded(inputs_dict['input_ids'].to(snake_case ) , inputs_dict['attention_mask'].to(snake_case ) )
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case , attention_mask=snake_case )[0]
_UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case )
_UpperCAmelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case , attention_mask=snake_case )[0]
_UpperCAmelCase = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , snake_case )
_UpperCAmelCase = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) )
| 573
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase ( A : int , A : int=None ):
'''simple docstring'''
require_version(deps[pkg] , A )
| 573
| 1
|
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase__ ( snake_case_ ):
def __init__( self : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = {}
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=1 , **UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
else:
lowercase_ = []
for i in range(UpperCamelCase__ ):
lowercase_ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowercase_ = output
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[Any]=1.0 ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = []
for i in range(len(UpperCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase_ = self.token_map[placeholder_token]
lowercase_ = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase_ = copy.copy(UpperCamelCase__ )
random.shuffle(UpperCamelCase__ )
lowercase_ = text.replace(UpperCamelCase__ , """ """.join(UpperCamelCase__ ) )
return text
def __call__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=1.0 , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=1.0 , **UpperCamelCase__ : int ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
| 701
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaInpaintPipeline
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowerCamelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Any = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : int = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.dummy_unet
snake_case__ : Optional[int] = self.dummy_movq
snake_case__ : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__SCREAMING_SNAKE_CASE , )
snake_case__ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
snake_case__ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : int = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : Optional[Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
snake_case__ : Optional[int] = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : Any = """cpu"""
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : str = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : str = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = output.images
snake_case__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : str = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case__ : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
snake_case__ : str = 0
snake_case__ : List[str] = """a hat"""
snake_case__ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
snake_case__ : Union[str, Any] = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ , snake_case__ : int = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : List[str] = pipeline(
image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
snake_case__ : List[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45
| 0
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_snake_case : List[str] = NewType('DataClass', Any)
_snake_case : List[str] = NewType('DataClassType', Any)
def _A ( __snake_case :List[Any] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _A ( __snake_case :list ) -> Callable[[str], Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {str(__snake_case ): choice for choice in choices}
return lambda __snake_case : str_to_choice.get(__snake_case , __snake_case )
def _A ( *,
__snake_case :Union[str, List[str]] = None , __snake_case :str = None , __snake_case :Any = dataclasses.MISSING , __snake_case :Callable[[], Any] = dataclasses.MISSING , __snake_case :dict = None , **__snake_case :Dict , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__SCREAMING_SNAKE_CASE = {}
if aliases is not None:
__SCREAMING_SNAKE_CASE = aliases
if help is not None:
__SCREAMING_SNAKE_CASE = help
return dataclasses.field(metadata=__snake_case , default=__snake_case , default_factory=__snake_case , **__snake_case )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, **_a ) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__SCREAMING_SNAKE_CASE = ArgumentDefaultsHelpFormatter
super().__init__(**_a )
if dataclasses.is_dataclass(_a ):
__SCREAMING_SNAKE_CASE = [dataclass_types]
__SCREAMING_SNAKE_CASE = list(_a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_a )
@staticmethod
def __lowerCAmelCase ( _a, _a ) -> str:
__SCREAMING_SNAKE_CASE = f'''--{field.name}'''
__SCREAMING_SNAKE_CASE = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, _a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__SCREAMING_SNAKE_CASE = kwargs.pop("aliases", [] )
if isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = [aliases]
__SCREAMING_SNAKE_CASE = getattr(field.type, "__origin__", field.type )
if origin_type is Union or (hasattr(_a, "UnionType" ) and isinstance(_a, types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(_a ) not in field.type.__args__:
# filter `str` in Union
__SCREAMING_SNAKE_CASE = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__SCREAMING_SNAKE_CASE = getattr(field.type, "__origin__", field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__SCREAMING_SNAKE_CASE = (
field.type.__args__[0] if isinstance(_a, field.type.__args__[1] ) else field.type.__args__[1]
)
__SCREAMING_SNAKE_CASE = getattr(field.type, "__origin__", field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__SCREAMING_SNAKE_CASE = {}
if origin_type is Literal or (isinstance(field.type, _a ) and issubclass(field.type, _a )):
if origin_type is Literal:
__SCREAMING_SNAKE_CASE = field.type.__args__
else:
__SCREAMING_SNAKE_CASE = [x.value for x in field.type]
__SCREAMING_SNAKE_CASE = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = field.default
else:
__SCREAMING_SNAKE_CASE = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__SCREAMING_SNAKE_CASE = copy(_a )
# Hack because type=bool in argparse does not behave as we want.
__SCREAMING_SNAKE_CASE = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__SCREAMING_SNAKE_CASE = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__SCREAMING_SNAKE_CASE = default
# This tells argparse we accept 0 or 1 value after --field_name
__SCREAMING_SNAKE_CASE = "?"
# This is the value that will get picked if we do --field_name (without value)
__SCREAMING_SNAKE_CASE = True
elif isclass(_a ) and issubclass(_a, _a ):
__SCREAMING_SNAKE_CASE = field.type.__args__[0]
__SCREAMING_SNAKE_CASE = "+"
if field.default_factory is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = field.default_factory()
elif field.default is dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = field.type
if field.default is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = field.default
elif field.default_factory is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = field.default_factory()
else:
__SCREAMING_SNAKE_CASE = True
parser.add_argument(_a, *_a, **_a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__SCREAMING_SNAKE_CASE = False
parser.add_argument(f'''--no_{field.name}''', action="store_false", dest=field.name, **_a )
def __lowerCAmelCase ( self, _a ) -> Optional[Any]:
if hasattr(_a, "_argument_group_name" ):
__SCREAMING_SNAKE_CASE = self.add_argument_group(dtype._argument_group_name )
else:
__SCREAMING_SNAKE_CASE = self
try:
__SCREAMING_SNAKE_CASE = get_type_hints(_a )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_a ):
__SCREAMING_SNAKE_CASE = ".".join(map(_a, sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(_a ):
if not field.init:
continue
__SCREAMING_SNAKE_CASE = type_hints[field.name]
self._parse_dataclass_field(_a, _a )
def __lowerCAmelCase ( self, _a=None, _a=False, _a=True, _a=None, _a=None, ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__SCREAMING_SNAKE_CASE = []
if args_filename:
args_files.append(Path(_a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__SCREAMING_SNAKE_CASE = ArgumentParser()
args_file_parser.add_argument(_a, type=_a, action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args_file_parser.parse_known_args(args=_a )
__SCREAMING_SNAKE_CASE = vars(_a ).get(args_file_flag.lstrip("-" ), _a )
if cmd_args_file_paths:
args_files.extend([Path(_a ) for p in cmd_args_file_paths] )
__SCREAMING_SNAKE_CASE = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__SCREAMING_SNAKE_CASE = file_args + args if args is not None else file_args + sys.argv[1:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.parse_known_args(args=_a )
__SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
__SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(_a ) if f.init}
__SCREAMING_SNAKE_CASE = {k: v for k, v in vars(_a ).items() if k in keys}
for k in keys:
delattr(_a, _a )
__SCREAMING_SNAKE_CASE = dtype(**_a )
outputs.append(_a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def __lowerCAmelCase ( self, _a, _a = False ) -> Tuple[DataClass, ...]:
__SCREAMING_SNAKE_CASE = set(args.keys() )
__SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
__SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(_a ) if f.init}
__SCREAMING_SNAKE_CASE = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__SCREAMING_SNAKE_CASE = dtype(**_a )
outputs.append(_a )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(_a )}''' )
return tuple(_a )
def __lowerCAmelCase ( self, _a, _a = False ) -> Tuple[DataClass, ...]:
with open(Path(_a ), encoding="utf-8" ) as open_json_file:
__SCREAMING_SNAKE_CASE = json.loads(open_json_file.read() )
__SCREAMING_SNAKE_CASE = self.parse_dict(_a, allow_extra_keys=_a )
return tuple(_a )
def __lowerCAmelCase ( self, _a, _a = False ) -> Tuple[DataClass, ...]:
__SCREAMING_SNAKE_CASE = self.parse_dict(yaml.safe_load(Path(_a ).read_text() ), allow_extra_keys=_a )
return tuple(_a )
| 214
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def _A ( __snake_case :str ) -> int:
"""simple docstring"""
for char in word:
__SCREAMING_SNAKE_CASE = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def _A ( __snake_case :List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
for token in tokens:
__SCREAMING_SNAKE_CASE = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
__SCREAMING_SNAKE_CASE = list(__snake_case )
return word_list
def _A ( __snake_case :List[str] , __snake_case :set() ) -> Any:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE = max([len(__snake_case ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE = bert_tokens
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, len(__snake_case )
while start < end:
__SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
__SCREAMING_SNAKE_CASE = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE = "##" + bert_word[j]
__SCREAMING_SNAKE_CASE = start + i
__SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def _A ( __snake_case :List[str] , __snake_case :LTP , __snake_case :BertTokenizer ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__SCREAMING_SNAKE_CASE = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = []
for id in input_ids:
__SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
__SCREAMING_SNAKE_CASE = add_sub_symbol(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def _A ( __snake_case :Tuple ) -> Any:
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
_snake_case : Union[str, Any] = parser.parse_args()
main(args)
| 214
| 1
|
import heapq
def _a ( __UpperCamelCase : dict ):
lowerCAmelCase__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__UpperCamelCase ,[-1 * len(__UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCAmelCase__ : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCAmelCase__ : Optional[int] = heapq.heappop(__UpperCamelCase )[1][0]
chosen_vertices.add(__UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCAmelCase__ : Dict = elem[1][1].index(__UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Union[str, Any] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 233
|
def _a ( __UpperCamelCase : int ):
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
lowerCAmelCase__ : List[str] = str(__UpperCamelCase )
lowerCAmelCase__ : List[Any] = ''''''.join(sorted(__UpperCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _a ( __UpperCamelCase : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Tuple = 1
while True:
if check_bouncy(__UpperCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(9_9)}""")
| 233
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self : str , a : int = 6_5536 , a : Optional[int] = None , a : int = 2 , a : int = 2 , a : int = 0 , a : str = "fourier" , a : bool = True , a : bool = False , a : float = 0.0 , a : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , a : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , a : Tuple[str] = "UNetMidBlock1D" , a : str = None , a : Tuple[int] = (32, 32, 64) , a : str = None , a : int = 8 , a : int = 1 , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=a , log=a , flip_sin_to_cos=a )
SCREAMING_SNAKE_CASE : List[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE : Tuple = Timesteps(
block_out_channels[0] , flip_sin_to_cos=a , downscale_freq_shift=a )
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE : Dict = TimestepEmbedding(
in_channels=a , time_embed_dim=a , act_fn=a , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = None
# down
SCREAMING_SNAKE_CASE : Optional[int] = in_channels
for i, down_block_type in enumerate(a ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE : Tuple = i == len(a ) - 1
SCREAMING_SNAKE_CASE : str = get_down_block(
a , num_layers=a , in_channels=a , out_channels=a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(a )
# mid
SCREAMING_SNAKE_CASE : Optional[Any] = get_mid_block(
a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=a , add_downsample=a , )
# up
SCREAMING_SNAKE_CASE : str = list(reversed(a ) )
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE : Dict = out_channels
else:
SCREAMING_SNAKE_CASE : Any = block_out_channels[0]
for i, up_block_type in enumerate(a ):
SCREAMING_SNAKE_CASE : Optional[int] = output_channel
SCREAMING_SNAKE_CASE : Tuple = (
reversed_block_out_channels[i + 1] if i < len(a ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE : str = i == len(a ) - 1
SCREAMING_SNAKE_CASE : List[str] = get_up_block(
a , num_layers=a , in_channels=a , out_channels=a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(a )
SCREAMING_SNAKE_CASE : Optional[Any] = output_channel
# out
SCREAMING_SNAKE_CASE : Any = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE : List[Any] = get_out_block(
out_block_type=a , num_groups_out=a , embed_dim=block_out_channels[0] , out_channels=a , act_fn=a , fc_dim=block_out_channels[-1] // 4 , )
def __UpperCamelCase ( self : Dict , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : bool = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = timestep
if not torch.is_tensor(a ):
SCREAMING_SNAKE_CASE : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(a ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : str = timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE : int = self.time_proj(a )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE : List[str] = self.time_mlp(a )
else:
SCREAMING_SNAKE_CASE : int = timestep_embed[..., None]
SCREAMING_SNAKE_CASE : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE : List[Any] = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = downsample_block(hidden_states=a , temb=a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE : str = self.mid_block(a , a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE : int = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE : str = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE : Tuple = upsample_block(a , res_hidden_states_tuple=a , temb=a )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE : Any = self.out_block(a , a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=a )
| 193
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Dict , a : List[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a , (str, list, tuple) ):
SCREAMING_SNAKE_CASE : Dict = data_files
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE : Optional[Any] = [dl_manager.iter_files(a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE : str = []
for split_name, files in data_files.items():
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a ):
with open(a , "rb" ) as f:
SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(a ) )
break
splits.append(datasets.SplitGenerator(name=a , gen_kwargs={"files": files} ) )
return splits
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : str = table_cast(a , self.info.features.arrow_schema )
return pa_table
def __UpperCamelCase ( self : List[str] , a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a ) ):
with open(a , "rb" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = pq.ParquetFile(a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE : int = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a )}: {e}" )
raise
| 193
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.