code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING A : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class __A( __UpperCAmelCase ): def __init__( self , *_snake_case , **_snake_case ) -> Optional[int]: '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) self.check_model_type(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]: '''simple docstring''' __a , __a = {}, {} if padding is not None: __a = padding if truncation is not None: __a = truncation if top_k is not None: __a = top_k return preprocess_params, {}, postprocess_params def __call__( self , _snake_case , _snake_case = None , **_snake_case ) -> Tuple: '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __a = {'''image''': image, '''question''': question} else: __a = image __a = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return results def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False , _snake_case=False ) -> Optional[Any]: '''simple docstring''' __a = load_image(inputs['''image'''] ) __a = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(__SCREAMING_SNAKE_CASE ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = self.model(**__SCREAMING_SNAKE_CASE ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=5 ) -> Optional[Any]: '''simple docstring''' if top_k > self.model.config.num_labels: __a = self.model.config.num_labels if self.framework == "pt": __a = model_outputs.logits.sigmoid()[0] __a , __a = probs.topk(__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) __a = scores.tolist() __a = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
6
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ): __a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __a = f'{olid} is not a valid Open Library olid' raise ValueError(_UpperCAmelCase ) return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json() def __snake_case ( _UpperCAmelCase ): __a = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __a = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __a = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = ''', '''.join(_UpperCAmelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(f'\nSearching Open Library for ISBN: {isbn}...\n') try: __snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}')) print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'Sorry, there are no results for ISBN: {isbn}.')
49
0
import argparse import struct import unittest class snake_case__ : """simple docstring""" def __init__( self , __lowercase ) -> Union[str, Any]: """simple docstring""" a__ : Any = data # Initialize hash values a__ : Union[str, Any] = [ 0x6a09_e667, 0xbb67_ae85, 0x3c6e_f372, 0xa54f_f53a, 0x510e_527f, 0x9b05_688c, 0x1f83_d9ab, 0x5be0_cd19, ] # Initialize round constants a__ : Tuple = [ 0x428a_2f98, 0x7137_4491, 0xb5c0_fbcf, 0xe9b5_dba5, 0x3956_c25b, 0x59f1_11f1, 0x923f_82a4, 0xab1c_5ed5, 0xd807_aa98, 0x1283_5b01, 0x2431_85be, 0x550c_7dc3, 0x72be_5d74, 0x80de_b1fe, 0x9bdc_06a7, 0xc19b_f174, 0xe49b_69c1, 0xefbe_4786, 0x0fc1_9dc6, 0x240c_a1cc, 0x2de9_2c6f, 0x4a74_84aa, 0x5cb0_a9dc, 0x76f9_88da, 0x983e_5152, 0xa831_c66d, 0xb003_27c8, 0xbf59_7fc7, 0xc6e0_0bf3, 0xd5a7_9147, 0x06ca_6351, 0x1429_2967, 0x27b7_0a85, 0x2e1b_2138, 0x4d2c_6dfc, 0x5338_0d13, 0x650a_7354, 0x766a_0abb, 0x81c2_c92e, 0x9272_2c85, 0xa2bf_e8a1, 0xa81a_664b, 0xc24b_8b70, 0xc76c_51a3, 0xd192_e819, 0xd699_0624, 0xf40e_3585, 0x106a_a070, 0x19a4_c116, 0x1e37_6c08, 0x2748_774c, 0x34b0_bcb5, 0x391c_0cb3, 0x4ed8_aa4a, 0x5b9c_ca4f, 0x682e_6ff3, 0x748f_82ee, 0x78a5_636f, 0x84c8_7814, 0x8cc7_0208, 0x90be_fffa, 0xa450_6ceb, 0xbef9_a3f7, 0xc671_78f2, ] a__ : Tuple = self.preprocessing(self.data ) self.final_hash() @staticmethod def SCREAMING_SNAKE_CASE__( __lowercase ) -> int: """simple docstring""" a__ : Union[str, Any] = B"""\x80""" + (B"""\x00""" * (6_3 - (len(__SCREAMING_SNAKE_CASE ) + 8) % 6_4)) a__ : Any = struct.pack(""">Q""" , (len(__SCREAMING_SNAKE_CASE ) * 8) ) return data + padding + big_endian_integer def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : Union[str, Any] = [ self.preprocessed_data[x : x + 6_4] for x in range(0 , len(self.preprocessed_data ) , 6_4 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers a__ : str = list(struct.unpack(""">16L""" , __SCREAMING_SNAKE_CASE ) ) # add 48 0-ed integers words += [0] * 4_8 a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ : Any = self.hashes for index in range(0 , 6_4 ): if index > 1_5: # modify the zero-ed indexes at the end of the array a__ : str = ( self.ror(words[index - 1_5] , 7 ) ^ self.ror(words[index - 1_5] , 1_8 ) ^ (words[index - 1_5] >> 3) ) a__ : Dict = ( self.ror(words[index - 2] , 1_7 ) ^ self.ror(words[index - 2] , 1_9 ) ^ (words[index - 2] >> 1_0) ) a__ : int = ( words[index - 1_6] + sa + words[index - 7] + sa ) % 0x1_0000_0000 # Compression a__ : Optional[Any] = self.ror(__SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(__SCREAMING_SNAKE_CASE , 1_1 ) ^ self.ror(__SCREAMING_SNAKE_CASE , 2_5 ) a__ : Optional[int] = (e & f) ^ ((~e & 0xffff_ffff) & g) a__ : Union[str, Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0000_0000 a__ : Dict = self.ror(__SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(__SCREAMING_SNAKE_CASE , 1_3 ) ^ self.ror(__SCREAMING_SNAKE_CASE , 2_2 ) a__ : List[Any] = (a & b) ^ (a & c) ^ (b & c) a__ : Tuple = (sa + maj) % 0x1_0000_0000 a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ : Dict = ( g, f, e, ((d + tempa) % 0x1_0000_0000), c, b, a, ((tempa + tempa) % 0x1_0000_0000), ) a__ : List[str] = [a, b, c, d, e, f, g, h] # Modify final values a__ : str = [ ((element + mutated_hash_values[index]) % 0x1_0000_0000) for index, element in enumerate(self.hashes ) ] a__ : List[Any] = """""".join([hex(__SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> List[str]: """simple docstring""" return 0xffff_ffff & (value << (3_2 - rotations)) | (value >> rotations) class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" import hashlib a__ : List[Any] = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(__SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(__SCREAMING_SNAKE_CASE ).hexdigest() ) def lowerCAmelCase_ ( ) -> Any: """simple docstring""" import doctest doctest.testmod() a__ : Any = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""") a__ : Optional[Any] = parser.parse_args() a__ : Tuple = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""") as f: a__ : Union[str, Any] = f.read() else: a__ : int = bytes(_UpperCAmelCase , """utf-8""") print(SHAaaa(_UpperCAmelCase).hash) if __name__ == "__main__": main()
170
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _A ( __UpperCAmelCase ): def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths} __a = Text( cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[str]): '''simple docstring''' if self.streaming: __a = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: __a = None __a = None __a = None __a = None self.builder.download_and_prepare( download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) __a = self.builder.as_dataset( split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory) return dataset
49
0
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCamelCase_ ( ) -> Optional[Any]: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase = 9, 14 # noqa: F841 __lowerCAmelCase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __lowerCAmelCase = defaultdict(_UpperCAmelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __lowerCAmelCase = mst(_UpperCAmelCase ) __lowerCAmelCase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __lowerCAmelCase = tuple(answer[:2] ) __lowerCAmelCase = tuple(edge[::-1] ) assert edge in result or reverse in result
229
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __snake_case :List[str] = '''\ Text data. Second line of data.''' __snake_case :Optional[Any] = '''file''' @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') __a = bytes(_UpperCAmelCase , '''utf-8''' ) with zstd.open(_UpperCAmelCase , '''wb''' ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def __snake_case ( _UpperCAmelCase ): with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} __a = input_paths[compression_format] __a = tmp_path / '''cache''' __a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: __a = f.read() with open(_UpperCAmelCase ) as f: __a = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''custom_cache''' __a = '''custom_extracted_dir''' __a = tmp_path / '''custom_extracted_path''' if default_extracted: __a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) ) __a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __a = xz_file __a = ( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase ) ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path __a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path __a = '''./__missing_file__.txt''' with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = get_from_cache(f'tmp://{tmpfs_file}' ) with open(_UpperCAmelCase ) as f: __a = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( ): with pytest.raises(_UpperCAmelCase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head('''s3://huggingface.co''' )
49
0
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class SCREAMING_SNAKE_CASE_ : __magic_name__: Dict = BlenderbotSmallConfig __magic_name__: Tuple = {} __magic_name__: str = '''gelu''' def __init__( self : Dict , _A : str , _A : List[Any]=13 , _A : Optional[int]=7 , _A : Optional[Any]=True , _A : List[Any]=False , _A : Dict=99 , _A : Optional[int]=32 , _A : Tuple=2 , _A : str=4 , _A : int=37 , _A : Tuple=0.1 , _A : Tuple=0.1 , _A : Optional[int]=20 , _A : List[str]=2 , _A : Optional[Any]=1 , _A : List[Any]=0 , ) -> List[Any]: """simple docstring""" snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : List[str] = use_labels snake_case_ : Union[str, Any] = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : List[str] = attention_probs_dropout_prob snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : Any = eos_token_id snake_case_ : Optional[Any] = pad_token_id snake_case_ : List[Any] = bos_token_id def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) snake_case_ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) snake_case_ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case_ : Union[str, Any] = prepare_blenderbot_small_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def UpperCAmelCase_ ( self : Any , _A : str , _A : Any ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = TFBlenderbotSmallModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() snake_case_ : Optional[int] = inputs_dict['input_ids'] snake_case_ : Tuple = input_ids[:1, :] snake_case_ : Optional[Any] = inputs_dict['attention_mask'][:1, :] snake_case_ : Any = inputs_dict['head_mask'] snake_case_ : int = 1 # first forward pass snake_case_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) snake_case_ ,snake_case_ : List[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and snake_case_ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) snake_case_ : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) snake_case_ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] snake_case_ : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice snake_case_ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) snake_case_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] snake_case_ : Optional[int] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3 ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ): if attention_mask is None: snake_case_ : List[Any] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case_ : List[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case_ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __magic_name__: Union[str, Any] = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __magic_name__: Union[str, Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __magic_name__: str = ( { '''conversational''': TFBlenderbotSmallForConditionalGeneration, '''feature-extraction''': TFBlenderbotSmallModel, '''summarization''': TFBlenderbotSmallForConditionalGeneration, '''text2text-generation''': TFBlenderbotSmallForConditionalGeneration, '''translation''': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __magic_name__: Optional[int] = True __magic_name__: int = False __magic_name__: str = False def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" snake_case_ : Any = TFBlenderbotSmallModelTester(self ) snake_case_ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : str ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_tokenizers @require_tf class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): __magic_name__: Union[str, Any] = [ '''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ''' ''' i\'m going to throw up.\nand why is that?''' ] __magic_name__: str = '''facebook/blenderbot_small-90M''' @cached_property def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) @cached_property def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" snake_case_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : int = self.tokenizer(self.src_text , return_tensors='tf' ) snake_case_ : Any = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__SCREAMING_SNAKE_CASE , ) snake_case_ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
327
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = (DDPMParallelScheduler,) def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE) return config def _lowerCamelCase ( self : List[str]): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5 def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = self.dummy_sample_deter + 0.1 __a = self.dummy_sample_deter - 0.1 __a = samplea.shape[0] __a = torch.stack([samplea, samplea, samplea] , dim=0) __a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE) __a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) __a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1)) __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 11_53.18_33) < 1E-2 assert abs(result_mean.item() - 0.50_05) < 1E-3 def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_58.96_06) < 1E-2 assert abs(result_mean.item() - 0.33_72) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(prediction_type='''v_prediction''') __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_02.02_96) < 1E-2 assert abs(result_mean.item() - 0.26_31) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) __a = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE): if i == len(__SCREAMING_SNAKE_CASE) - 1: __a = -1 else: __a = timesteps[i + 1] __a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE) __a = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] __a = len(__SCREAMING_SNAKE_CASE) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A : Dict = '''▁''' __A : Tuple = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class _a ( __UpperCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = BertGenerationTokenizer UpperCamelCase__ = False UpperCamelCase__ = True def lowercase__ ( self : Dict )->str: super().setUp() _UpperCAmelCase = BertGenerationTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : int )->Union[str, Any]: _UpperCAmelCase = '''<s>''' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_0_0_2 ) def lowercase__ ( self : Optional[Any] )->str: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = BertGenerationTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) _UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def lowercase__ ( self : Optional[int] )->Optional[int]: return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def lowercase__ ( self : Dict )->List[str]: _UpperCAmelCase = '''Hello World!''' _UpperCAmelCase = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) _UpperCAmelCase = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @require_torch @slow def lowercase__ ( self : Dict )->Union[str, Any]: import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence _UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0] _UpperCAmelCase = ''' '''.join(__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.big_tokenizer.encode_plus(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , return_token_type_ids=__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BertGenerationConfig() _UpperCAmelCase = BertGenerationEncoder(__SCREAMING_SNAKE_CASE ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__SCREAMING_SNAKE_CASE ) model(**__SCREAMING_SNAKE_CASE ) @slow def lowercase__ ( self : Any )->str: _UpperCAmelCase = {'''input_ids''': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
260
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __snake_case :List[Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) requires_backends(self , '''vision''') requires_backends(self , '''torch''') if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.') self.check_model_type(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = {} __a = {} __a = {} # preprocess args if "points_per_batch" in kwargs: __a = kwargs['''points_per_batch'''] if "points_per_crop" in kwargs: __a = kwargs['''points_per_crop'''] if "crops_n_layers" in kwargs: __a = kwargs['''crops_n_layers'''] if "crop_overlap_ratio" in kwargs: __a = kwargs['''crop_overlap_ratio'''] if "crop_n_points_downscale_factor" in kwargs: __a = kwargs['''crop_n_points_downscale_factor'''] # postprocess args if "pred_iou_thresh" in kwargs: __a = kwargs['''pred_iou_thresh'''] if "stability_score_offset" in kwargs: __a = kwargs['''stability_score_offset'''] if "mask_threshold" in kwargs: __a = kwargs['''mask_threshold'''] if "stability_score_thresh" in kwargs: __a = kwargs['''stability_score_thresh'''] if "crops_nms_thresh" in kwargs: __a = kwargs['''crops_nms_thresh'''] if "output_rle_mask" in kwargs: __a = kwargs['''output_rle_mask'''] if "output_bboxes_mask" in kwargs: __a = kwargs['''output_bboxes_mask'''] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ): '''simple docstring''' __a = load_image(__SCREAMING_SNAKE_CASE) __a = self.image_processor.size['''longest_edge'''] __a , __a , __a , __a = self.image_processor.generate_crop_boxes( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') with self.device_placement(): if self.framework == "pt": __a = self.get_inference_context() with inference_context(): __a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''')) __a = image_embeddings __a = grid_points.shape[1] __a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( '''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ''' '''To return all points at once, set points_per_batch to None''') for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = grid_points[:, i : i + points_per_batch, :, :] __a = input_labels[:, i : i + points_per_batch] __a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ): '''simple docstring''' __a = model_inputs.pop('''input_boxes''') __a = model_inputs.pop('''is_last''') __a = model_inputs.pop('''original_sizes''').tolist() __a = model_inputs.pop('''reshaped_input_sizes''').tolist() __a = self.model(**__SCREAMING_SNAKE_CASE) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __a = model_outputs['''pred_masks'''] __a = self.image_processor.post_process_masks( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE) __a = model_outputs['''iou_scores'''] __a , __a , __a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ): '''simple docstring''' __a = [] __a = [] __a = [] for model_output in model_outputs: all_scores.append(model_output.pop('''iou_scores''')) all_masks.extend(model_output.pop('''masks''')) all_boxes.append(model_output.pop('''boxes''')) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a , __a , __a , __a = self.image_processor.post_process_for_mask_generation( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = defaultdict(__SCREAMING_SNAKE_CASE) for output in model_outputs: for k, v in output.items(): extra[k].append(__SCREAMING_SNAKE_CASE) __a = {} if output_rle_mask: __a = rle_mask if output_bboxes_mask: __a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
49
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _A : Optional[int] , _A : Any , _A : List[Any] , _A : Any ): """simple docstring""" lowerCamelCase__ : Any = BigBirdConfig.from_json_file(_UpperCAmelCase ) print(F"Building PyTorch model from configuration: {config}" ) if is_trivia_qa: lowerCamelCase__ : List[str] = BigBirdForQuestionAnswering(_UpperCAmelCase ) else: lowerCamelCase__ : Tuple = BigBirdForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--big_bird_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head." ) A : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
184
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __snake_case :str = logging.get_logger(__name__) __snake_case :int = {'''vocab_file''': '''vocab.txt'''} __snake_case :List[Any] = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __snake_case :List[str] = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __snake_case :Optional[int] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : int = ConvBertTokenizer def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars ): __a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''')) __a = do_lower_case __a = strip_accents __a = tokenize_chinese_chars __a = normalizer_class(**__SCREAMING_SNAKE_CASE) __a = do_lower_case def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None): '''simple docstring''' __a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' __a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE) return tuple(__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" from timeit import timeit def lowercase_ ( _snake_case ): if number < 0: raise ValueError("""the value of input must not be negative""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 while number: number &= number - 1 result += 1 return result def lowercase_ ( _snake_case ): if number < 0: raise ValueError("""the value of input must not be negative""" ) SCREAMING_SNAKE_CASE__ : List[str] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase_ ( ): def do_benchmark(_snake_case ) -> None: SCREAMING_SNAKE_CASE__ : Optional[Any] = """import __main__ as z""" print(f'''Benchmark when {number = }:''' ) print(f'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' ) SCREAMING_SNAKE_CASE__ : Any = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" ,setup=_UpperCAmelCase ) print(f'''timeit() runs in {timing} seconds''' ) print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" ,setup=_UpperCAmelCase ,) print(f'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(_UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
25
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case :Any = logging.get_logger(__name__) __snake_case :Optional[Any] = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } __snake_case :List[Any] = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def __snake_case ( _UpperCAmelCase ): __a = EfficientNetConfig() __a = CONFIG_MAP[model_name]['''hidden_dim'''] __a = CONFIG_MAP[model_name]['''width_coef'''] __a = CONFIG_MAP[model_name]['''depth_coef'''] __a = CONFIG_MAP[model_name]['''image_size'''] __a = CONFIG_MAP[model_name]['''dropout_rate'''] __a = CONFIG_MAP[model_name]['''dw_padding'''] __a = '''huggingface/label-files''' __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def __snake_case ( ): __a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im def __snake_case ( _UpperCAmelCase ): __a = CONFIG_MAP[model_name]['''image_size'''] __a = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , ) return preprocessor def __snake_case ( _UpperCAmelCase ): __a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] __a = sorted(set(_UpperCAmelCase ) ) __a = len(_UpperCAmelCase ) __a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )} __a = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: __a = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) __a = {} for item in rename_keys: if item[0] in original_param_names: __a = '''efficientnet.''' + item[1] __a = '''classifier.weight''' __a = '''classifier.bias''' return key_mapping def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for key, value in tf_params.items(): if "normalization" in key: continue __a = key_mapping[key] if "_conv" in key and "kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __a = torch.from_numpy(np.transpose(_UpperCAmelCase ) ) else: __a = torch.from_numpy(_UpperCAmelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCAmelCase ) @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = model_classes[model_name]( include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , ) __a = original_model.trainable_variables __a = original_model.non_trainable_variables __a = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __a = param.numpy() __a = list(tf_params.keys() ) # Load HuggingFace model __a = get_efficientnet_config(_UpperCAmelCase ) __a = EfficientNetForImageClassification(_UpperCAmelCase ).eval() __a = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) __a = rename_keys(_UpperCAmelCase ) replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Initialize preprocessor and preprocess input image __a = convert_image_processor(_UpperCAmelCase ) __a = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): __a = hf_model(**_UpperCAmelCase ) __a = outputs.logits.detach().numpy() # Original model inference __a = False __a = CONFIG_MAP[model_name]['''image_size'''] __a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __a = image.img_to_array(_UpperCAmelCase ) __a = np.expand_dims(_UpperCAmelCase , axis=0 ) __a = original_model.predict(_UpperCAmelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCAmelCase ): os.mkdir(_UpperCAmelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCAmelCase ) preprocessor.save_pretrained(_UpperCAmelCase ) if push_to_hub: # Push model and image processor to hub print(f'Pushing converted {model_name} to the hub...' ) __a = f'efficientnet-{model_name}' preprocessor.push_to_hub(_UpperCAmelCase ) hf_model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') __snake_case :Optional[int] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
49
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class a_ ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , A , A ) -> Dict: _SCREAMING_SNAKE_CASE = params _SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = np.array([len(__SCREAMING_SNAKE_CASE ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , A ) -> List[Any]: return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Optional[Any]: return len(self.lengths ) def snake_case_( self ) -> Any: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.params.max_model_input_size _SCREAMING_SNAKE_CASE = self.lengths > max_len logger.info(f'Splitting {sum(__SCREAMING_SNAKE_CASE )} too long sequences.' ) def divide_chunks(A , A ): return [l[i : i + n] for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] if self.params.mlm: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _SCREAMING_SNAKE_CASE = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _SCREAMING_SNAKE_CASE = np.insert(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE ) if sub_s[-1] != sep_id: _SCREAMING_SNAKE_CASE = np.insert(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) assert len(__SCREAMING_SNAKE_CASE ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__SCREAMING_SNAKE_CASE ) new_tok_ids.extend(__SCREAMING_SNAKE_CASE ) new_lengths.extend([len(__SCREAMING_SNAKE_CASE ) for l in sub_seqs] ) _SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ) def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = self.lengths > 11 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' ) def snake_case_( self ) -> Tuple: if "unk_token" not in self.params.special_tok_ids: return else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = len(self ) _SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5 _SCREAMING_SNAKE_CASE = self.token_ids[indices] _SCREAMING_SNAKE_CASE = self.lengths[indices] _SCREAMING_SNAKE_CASE = len(self ) logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' ) def snake_case_( self ) -> List[Any]: if not self.params.is_master: return logger.info(f'{len(self )} sequences' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def snake_case_( self , A ) -> List[Any]: _SCREAMING_SNAKE_CASE = [t[0] for t in batch] _SCREAMING_SNAKE_CASE = [t[1] for t in batch] assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) # Max for paddings _SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE ) # Pad token ids if self.params.mlm: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""] else: _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""] _SCREAMING_SNAKE_CASE = [list(t.astype(__SCREAMING_SNAKE_CASE ) ) + [pad_idx] * (max_seq_len_ - len(__SCREAMING_SNAKE_CASE )) for t in token_ids] assert len(tk_ ) == len(__SCREAMING_SNAKE_CASE ) assert all(len(__SCREAMING_SNAKE_CASE ) == max_seq_len_ for t in tk_ ) _SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_) _SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ) # (bs) return tk_t, lg_t
58
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case :Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(f'{len(upper_files)} files contain uppercase characters:') print('''\n'''.join(upper_files) + '''\n''') __snake_case :Tuple = [file for file in filepaths if ''' ''' in file] if space_files: print(f'{len(space_files)} files contain space characters:') print('''\n'''.join(space_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if '''-''' in file] if hyphen_files: print(f'{len(hyphen_files)} files contain hyphen characters:') print('''\n'''.join(hyphen_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'{len(nodir_files)} files are not in a directory:') print('''\n'''.join(nodir_files) + '''\n''') __snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
49
0
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer UpperCAmelCase__ : List[str] = flax_key_tuple[:-1] + ("""weight""",) UpperCAmelCase__ : Tuple = torch.permute(_UpperCAmelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ): # linear layer UpperCAmelCase__ : Dict = flax_key_tuple[:-1] + ("""weight""",) UpperCAmelCase__ : Union[str, Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCAmelCase__ : str = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): if "metadata" in layer: UpperCAmelCase__ : Optional[Any] = layer.split("""metadata""" ) UpperCAmelCase__ : List[str] = """""".join(split_layer[0] )[:-1] UpperCAmelCase__ : Optional[int] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: UpperCAmelCase__ : Union[str, Any] = layer.split("""kvstore""" ) UpperCAmelCase__ : str = """""".join(split_layer[0] )[:-1] UpperCAmelCase__ : int = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: UpperCAmelCase__ : Optional[int] = layer.split("""/""" ) UpperCAmelCase__ : Tuple = """/""".join(split_layer[:-1] ) UpperCAmelCase__ : List[Any] = (split_layer[-1],) if "kvstore/path" in layer: UpperCAmelCase__ : Tuple = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: UpperCAmelCase__ : int = """file""" else: UpperCAmelCase__ : Optional[int] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase__ : int = rename_keys(_UpperCAmelCase ) UpperCAmelCase__ : Optional[Any] = {} for k, v in current_block.items(): UpperCAmelCase__ : Dict = v UpperCAmelCase__ : int = new_current_block torch.save(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = WEIGHTS_NAME ): UpperCAmelCase__ : Optional[Any] = convert_file_size_to_int(_UpperCAmelCase ) UpperCAmelCase__ : Dict = [] UpperCAmelCase__ : Any = {} UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ : Optional[int] = 0 os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: UpperCAmelCase__ : Union[str, Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] UpperCAmelCase__ : List[str] = flatten_dict(_UpperCAmelCase , sep="""/""" ) UpperCAmelCase__ : Optional[Any] = {} for layer in checkpoint_info.keys(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = get_key_and_tensorstore_dict( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if curr_real_layer_name in all_layers: UpperCAmelCase__ : List[Any] = content else: UpperCAmelCase__ : Optional[Any] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file UpperCAmelCase__ : Optional[int] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() UpperCAmelCase__ : str = torch.tensor(_UpperCAmelCase ) UpperCAmelCase__ : Tuple = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , _UpperCAmelCase ) UpperCAmelCase__ : int = """/""".join(_UpperCAmelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: UpperCAmelCase__ : List[Any] = os.path.join( _UpperCAmelCase , weights_name.replace(""".bin""" , f'''-{len(_UpperCAmelCase )+1:05d}-of-???.bin''' ) ) rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block UpperCAmelCase__ : int = {} UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : str = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block UpperCAmelCase__ : Optional[int] = os.path.join(_UpperCAmelCase , weights_name.replace(""".bin""" , f'''-{len(_UpperCAmelCase )+1:05d}-of-???.bin''' ) ) rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(_UpperCAmelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index UpperCAmelCase__ : Any = {} UpperCAmelCase__ : Optional[int] = {} for idx, shard in enumerate(_UpperCAmelCase ): UpperCAmelCase__ : int = weights_name.replace( """.bin""" , f'''-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d} UpperCAmelCase__ : List[Any] = os.path.join(_UpperCAmelCase , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) UpperCAmelCase__ : Any = shard for key in shard: UpperCAmelCase__ : Union[str, Any] = shard_file # Add the metadata UpperCAmelCase__ : List[str] = {"""total_size""": total_size} UpperCAmelCase__ : Any = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase__ : List[Any] = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + """\n""" f.write(_UpperCAmelCase ) return metadata, index if __name__ == "__main__": __A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) __A =parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def _UpperCamelCase ( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer UpperCAmelCase__ : Tuple = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) UpperCAmelCase__ : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) UpperCAmelCase__ : Optional[Any] = TaTokenizer.from_pretrained("""t5-small""" ) UpperCAmelCase__ : Union[str, Any] = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" UpperCAmelCase__ : Optional[Any] = tokenizer(_UpperCAmelCase , return_tensors="""pt""" ).input_ids UpperCAmelCase__ : List[Any] = model.generate(_UpperCAmelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
163
from collections import defaultdict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = first_str.lower().strip() __a = second_str.lower().strip() # Remove whitespace __a = first_str.replace(''' ''' , '''''' ) __a = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False # Default values for count should be 0 __a = defaultdict(_UpperCAmelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(_UpperCAmelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __snake_case :Any = input('''Enter the first string ''').strip() __snake_case :int = input('''Enter the second string ''').strip() __snake_case :int = check_anagrams(input_a, input_b) print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
49
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class __lowerCamelCase ( __UpperCAmelCase): """simple docstring""" UpperCamelCase__ = '''markuplm''' def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=256 , UpperCAmelCase=1024 , UpperCAmelCase=216 , UpperCAmelCase=1001 , UpperCAmelCase=32 , UpperCAmelCase=50 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ): """simple docstring""" super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout # additional properties _UpperCAmelCase = max_depth _UpperCAmelCase = max_xpath_tag_unit_embeddings _UpperCAmelCase = max_xpath_subs_unit_embeddings _UpperCAmelCase = tag_pad_id _UpperCAmelCase = subs_pad_id _UpperCAmelCase = xpath_unit_hidden_size
39
import logging from transformers.configuration_utils import PretrainedConfig __snake_case :Any = logging.getLogger(__name__) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = '''masked_bert''' def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = pruning_method __a = mask_init __a = mask_scale
49
0
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline lowerCAmelCase__ = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": lowerCAmelCase__ = '''hopper-medium-v2''' lowerCAmelCase__ = gym.make(env_name) lowerCAmelCase__ = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) lowerCAmelCase__ = env.reset() lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 1_000 lowerCAmelCase__ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy lowerCAmelCase__ = pipeline(obs, planning_horizon=32) # execute action in environment lowerCAmelCase__ = env.step(denorm_actions) lowerCAmelCase__ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) lowerCAmelCase__ = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
153
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _A : UpperCamelCase__ : Optional[Union[str, Path]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = 1 UpperCamelCase__ : Optional[Union[str, bool]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
49
0
from collections import defaultdict def __lowerCAmelCase ( a__ , a__ ) -> Dict: __a = first_str.lower().strip() __a = second_str.lower().strip() # Remove whitespace __a = first_str.replace(''' ''' , '''''' ) __a = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False # Default values for count should be 0 __a = defaultdict(_UpperCAmelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(_UpperCAmelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() A : Any = input('Enter the first string ').strip() A : int = input('Enter the second string ').strip() A : int = check_anagrams(input_a, input_b) print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
6
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Union[str, Any] = logging.get_logger(__name__) __snake_case :Any = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = '''switch_transformers''' UpperCamelCase__ : Optional[Any] = ['''past_key_values'''] UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' __a = vocab_size __a = d_model __a = d_kv __a = d_ff __a = num_sparse_encoder_layers __a = num_layers __a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __a = self.num_layers // self.num_sparse_encoder_layers else: __a = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __a = self.num_decoder_layers // self.num_sparse_decoder_layers else: __a = self.num_decoder_layers # HACK: this will create 0 sparse layers __a = num_heads __a = num_experts __a = expert_capacity __a = router_bias __a = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}') __a = router_dtype __a = router_ignore_padding_tokens __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = dropout_rate __a = layer_norm_epsilon __a = initializer_factor __a = feed_forward_proj __a = use_cache __a = add_router_probs __a = router_z_loss_coef __a = router_aux_loss_coef __a = self.feed_forward_proj.split('''-''') __a = act_info[-1] __a = act_info[0] == '''gated''' if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''') # for backwards compatibility if feed_forward_proj == "gated-gelu": __a = '''gelu_new''' super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
49
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__ : """simple docstring""" def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> int: """simple docstring""" a__ : Optional[Any] = parent a__ : str = batch_size a__ : List[str] = is_training a__ : Any = use_auxiliary_loss a__ : str = num_queries a__ : Optional[Any] = num_channels a__ : Optional[int] = min_size a__ : Optional[int] = max_size a__ : Optional[int] = num_labels a__ : Dict = mask_feature_size def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __SCREAMING_SNAKE_CASE ) a__ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) a__ : int = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) > 0.5 ).float() a__ : str = (torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE ) > 0.5).long() a__ : Tuple = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ , a__ , a__ , a__ , a__ : List[Any] = self.prepare_config_and_inputs() a__ : Tuple = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> List[Any]: """simple docstring""" a__ : List[str] = output.encoder_hidden_states a__ : Optional[Any] = output.pixel_decoder_hidden_states a__ : List[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: """simple docstring""" with torch.no_grad(): a__ : Optional[Any] = MaskFormerModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() a__ : Union[str, Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE ) a__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]: """simple docstring""" a__ : Any = MaskFormerForInstanceSegmentation(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a__ : List[str] = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE ) a__ : List[Any] = model(__SCREAMING_SNAKE_CASE ) comm_check_on_output(__SCREAMING_SNAKE_CASE ) a__ : List[Any] = model( pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ) comm_check_on_output(__SCREAMING_SNAKE_CASE ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__ (__UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __lowerCAmelCase :Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowerCAmelCase :Any = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowerCAmelCase :Optional[Any] = False __lowerCAmelCase :Any = False __lowerCAmelCase :Union[str, Any] = False __lowerCAmelCase :Dict = False def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : Dict = MaskFormerModelTester(self ) a__ : Optional[int] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" a__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : str = model_class(__SCREAMING_SNAKE_CASE ) a__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Tuple = [*signature.parameters.keys()] a__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a__ : Tuple = MaskFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" a__ : Optional[Any] = (self.model_tester.min_size,) * 2 a__ : int = { """pixel_values""": torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE ), """mask_labels""": torch.randn((2, 1_0, *size) , device=__SCREAMING_SNAKE_CASE ), """class_labels""": torch.zeros(2 , 1_0 , device=__SCREAMING_SNAKE_CASE ).long(), } a__ : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__SCREAMING_SNAKE_CASE ) a__ : int = model(**__SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) a__ : int = model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a__ : Union[str, Any] = self.all_model_classes[1] a__ , a__ , a__ , a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs() a__ : str = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.train() a__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).loss loss.backward() def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Optional[int] = self.all_model_classes[1] a__ , a__ , a__ , a__ , a__ : str = self.model_tester.prepare_config_and_inputs() a__ : Union[str, Any] = True a__ : Dict = True a__ : Any = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.train() a__ : List[Any] = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ) a__ : Any = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a__ : Optional[Any] = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a__ : Optional[int] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a__ : int = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowercase : int =1E-4 def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" a__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") return image @require_vision @slow class snake_case__ (unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : int = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__SCREAMING_SNAKE_CASE ) a__ : int = self.default_image_processor a__ : Any = prepare_img() a__ : List[Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) a__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): a__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) a__ : Tuple = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) a__ : Any = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) a__ : Tuple = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__SCREAMING_SNAKE_CASE ) .eval() ) a__ : Optional[Any] = self.default_image_processor a__ : List[str] = prepare_img() a__ : Union[str, Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) a__ : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): a__ : List[Any] = model(**__SCREAMING_SNAKE_CASE ) # masks_queries_logits a__ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a__ : int = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a__ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) # class_queries_logits a__ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a__ : Union[str, Any] = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" a__ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__SCREAMING_SNAKE_CASE ) .eval() ) a__ : List[str] = self.default_image_processor a__ : str = prepare_img() a__ : str = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE ) a__ : int = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): a__ : Tuple = model(**__SCREAMING_SNAKE_CASE ) # masks_queries_logits a__ : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a__ : str = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) # class_queries_logits a__ : int = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a__ : List[str] = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" a__ : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__SCREAMING_SNAKE_CASE ) .eval() ) a__ : int = self.default_image_processor a__ : int = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , ) a__ : str = inputs["""pixel_values"""].to(__SCREAMING_SNAKE_CASE ) a__ : int = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["""mask_labels"""]] a__ : Optional[Any] = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["""class_labels"""]] with torch.no_grad(): a__ : Tuple = model(**__SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None )
170
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex __snake_case :List[Any] = logging.getLogger(__name__) class _A : def __init__( self : List[str]): '''simple docstring''' __a = False def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' if not self.initialized: __a = RagRetriever( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , ) __a = True def _lowerCamelCase ( self : List[str]): '''simple docstring''' self.retriever.index.init_index() def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return doc_ids, retrieved_doc_embeds class _A ( __UpperCAmelCase ): def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None): '''simple docstring''' if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0: raise ValueError( '''When using Ray for distributed fine-tuning, ''' '''you\'ll need to provide the paths instead, ''' '''as the dataset and the index are loaded ''' '''separately. More info in examples/rag/use_own_knowledge_dataset.py ''') super().__init__( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , ) __a = retrieval_workers if len(self.retrieval_workers) > 0: ray.get( [ worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for worker in self.retrieval_workers ]) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' logger.info('''initializing retrieval''') if len(self.retrieval_workers) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' if len(self.retrieval_workers) > 0: # Select a random retrieval actor. __a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)] __a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) else: __a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE) __a = rag_tokenizer.question_encoder __a = rag_tokenizer.generator if indexed_dataset is not None: __a = '''custom''' __a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE) else: __a = cls._build_index(__SCREAMING_SNAKE_CASE) return cls( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
49
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _lowercase ( __UpperCAmelCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Tuple = '''Salesforce/blip-image-captioning-base''' _SCREAMING_SNAKE_CASE : Optional[int] = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) _SCREAMING_SNAKE_CASE : Optional[int] = '''image_captioner''' _SCREAMING_SNAKE_CASE : str = AutoModelForVisionaSeq _SCREAMING_SNAKE_CASE : str = ['''image'''] _SCREAMING_SNAKE_CASE : str = ['''text'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]: requires_backends(self , ["""vision"""] ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : "Image" ) -> Optional[Any]: return self.pre_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: return self.model.generate(**__SCREAMING_SNAKE_CASE ) def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> Any: return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0].strip()
229
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = BigBirdConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) if is_trivia_qa: __a = BigBirdForQuestionAnswering(_UpperCAmelCase ) else: __a = BigBirdForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--big_bird_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.''' ) __snake_case :Any = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
49
0
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger _SCREAMING_SNAKE_CASE = get_logger(__name__) class SCREAMING_SNAKE_CASE_ : def __init__( self : List[str] , _A : Optional[str] = None ) -> Dict: """simple docstring""" snake_case_ : Any = ( os.path.join(__SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) snake_case_ : int = Extractor def UpperCAmelCase_ ( self : List[Any] , _A : str ) -> Optional[int]: """simple docstring""" from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" snake_case_ : Optional[int] = os.path.abspath(__SCREAMING_SNAKE_CASE ) return os.path.join(self.extract_dir , hash_url_to_filename(__SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase_ ( self : Optional[Any] , _A : str , _A : bool ) -> Optional[int]: """simple docstring""" return force_extract or ( not os.path.isfile(__SCREAMING_SNAKE_CASE ) and not (os.path.isdir(__SCREAMING_SNAKE_CASE ) and os.listdir(__SCREAMING_SNAKE_CASE )) ) def UpperCAmelCase_ ( self : str , _A : str , _A : bool = False ) -> Any: """simple docstring""" snake_case_ : Dict = self.extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE ) if not extractor_format: return input_path snake_case_ : Optional[Any] = self._get_output_path(__SCREAMING_SNAKE_CASE ) if self._do_extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return output_path class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): @classmethod @abstractmethod def UpperCAmelCase_ ( cls : Dict , _A : Union[Path, str] , **_A : Tuple ) -> int: """simple docstring""" ... @staticmethod @abstractmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> Tuple: """simple docstring""" ... class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase , __UpperCAmelCase ): __magic_name__: List[bytes] = [] @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : int ) -> Any: """simple docstring""" with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f: return f.read(__SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase_ ( cls : Any , _A : Union[Path, str] , _A : bytes = b"" ) -> List[str]: """simple docstring""" if not magic_number: snake_case_ : int = max(len(__SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers ) try: snake_case_ : Any = cls.read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except OSError: return False return any(magic_number.startswith(__SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers ) class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): @classmethod def UpperCAmelCase_ ( cls : Optional[int] , _A : Union[Path, str] , **_A : List[Any] ) -> int: """simple docstring""" return tarfile.is_tarfile(__SCREAMING_SNAKE_CASE ) @staticmethod def UpperCAmelCase_ ( _A : Optional[Any] , _A : Tuple ) -> List[Any]: """simple docstring""" def resolved(_A : str ) -> str: return os.path.realpath(os.path.abspath(__SCREAMING_SNAKE_CASE ) ) def badpath(_A : str , _A : str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ).startswith(__SCREAMING_SNAKE_CASE ) def badlink(_A : str , _A : str ) -> bool: # Links are interpreted relative to the directory containing the link snake_case_ : Optional[Any] = resolved(os.path.join(__SCREAMING_SNAKE_CASE , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=__SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = resolved(__SCREAMING_SNAKE_CASE ) for finfo in members: if badpath(finfo.name , __SCREAMING_SNAKE_CASE ): logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" ) elif finfo.issym() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" ) elif finfo.islnk() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" ) else: yield finfo @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> List[str]: """simple docstring""" os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = tarfile.open(__SCREAMING_SNAKE_CASE ) tar_file.extractall(__SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) tar_file.close() class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): __magic_name__: Dict = [B'''\x1F\x8B'''] @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> Optional[int]: """simple docstring""" with gzip.open(__SCREAMING_SNAKE_CASE , 'rb' ) as gzip_file: with open(__SCREAMING_SNAKE_CASE , 'wb' ) as extracted_file: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): __magic_name__: Dict = [ B'''PK\x03\x04''', B'''PK\x05\x06''', # empty archive B'''PK\x07\x08''', # spanned archive ] @classmethod def UpperCAmelCase_ ( cls : Tuple , _A : Union[Path, str] , _A : bytes = b"" ) -> str: """simple docstring""" if super().is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(__SCREAMING_SNAKE_CASE , 'rb' ) as fp: snake_case_ : Dict = _EndRecData(__SCREAMING_SNAKE_CASE ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: snake_case_ : Any = fp.read(__SCREAMING_SNAKE_CASE ) # CD is where we expect it to be if len(__SCREAMING_SNAKE_CASE ) == sizeCentralDir: snake_case_ : Dict = struct.unpack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> Optional[Any]: """simple docstring""" os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , 'r' ) as zip_file: zip_file.extractall(__SCREAMING_SNAKE_CASE ) zip_file.close() class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): __magic_name__: Optional[Any] = [B'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> Tuple: """simple docstring""" with lzma.open(__SCREAMING_SNAKE_CASE ) as compressed_file: with open(__SCREAMING_SNAKE_CASE , 'wb' ) as extracted_file: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): __magic_name__: int = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> List[str]: """simple docstring""" if not config.RARFILE_AVAILABLE: raise ImportError('Please pip install rarfile' ) import rarfile os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) snake_case_ : Dict = rarfile.RarFile(__SCREAMING_SNAKE_CASE ) rf.extractall(__SCREAMING_SNAKE_CASE ) rf.close() class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): __magic_name__: List[Any] = [B'''\x28\xb5\x2F\xFD'''] @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> str: """simple docstring""" if not config.ZSTANDARD_AVAILABLE: raise ImportError('Please pip install zstandard' ) import zstandard as zstd snake_case_ : List[Any] = zstd.ZstdDecompressor() with open(__SCREAMING_SNAKE_CASE , 'rb' ) as ifh, open(__SCREAMING_SNAKE_CASE , 'wb' ) as ofh: dctx.copy_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): __magic_name__: str = [B'''\x42\x5A\x68'''] @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> Union[str, Any]: """simple docstring""" with bza.open(__SCREAMING_SNAKE_CASE , 'rb' ) as compressed_file: with open(__SCREAMING_SNAKE_CASE , 'wb' ) as extracted_file: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): __magic_name__: Union[str, Any] = [B'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> Union[str, Any]: """simple docstring""" if not config.PY7ZR_AVAILABLE: raise ImportError('Please pip install py7zr' ) import pyazr os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) with pyazr.SevenZipFile(__SCREAMING_SNAKE_CASE , 'r' ) as archive: archive.extractall(__SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): __magic_name__: int = [B'''\x04\x22\x4D\x18'''] @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : Union[Path, str] ) -> Optional[int]: """simple docstring""" if not config.LZ4_AVAILABLE: raise ImportError('Please pip install lz4' ) import lza.frame with lza.frame.open(__SCREAMING_SNAKE_CASE , 'rb' ) as compressed_file: with open(__SCREAMING_SNAKE_CASE , 'wb' ) as extracted_file: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE_ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) __magic_name__: Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCAmelCase_ ( cls : int ) -> Tuple: """simple docstring""" return max( len(__SCREAMING_SNAKE_CASE ) for extractor in cls.extractors.values() if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCAmelCase_ ( _A : Union[Path, str] , _A : int ) -> Tuple: """simple docstring""" try: return MagicNumberBaseExtractor.read_magic_number(__SCREAMING_SNAKE_CASE , magic_number_length=__SCREAMING_SNAKE_CASE ) except OSError: return b"" @classmethod def UpperCAmelCase_ ( cls : Optional[Any] , _A : Union[Path, str] , _A : bool = False ) -> int: """simple docstring""" warnings.warn( 'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ' 'Use \'infer_extractor_format\' instead.' , category=__SCREAMING_SNAKE_CASE , ) snake_case_ : Optional[int] = cls.infer_extractor_format(__SCREAMING_SNAKE_CASE ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCAmelCase_ ( cls : Any , _A : Union[Path, str] ) -> Dict: # <Added version="2.4.0"/> """simple docstring""" snake_case_ : Dict = cls._get_magic_number_max_length() snake_case_ : Optional[int] = cls._read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE ): return extractor_format @classmethod def UpperCAmelCase_ ( cls : List[str] , _A : Union[Path, str] , _A : Union[Path, str] , _A : Optional[str] = None , _A : Optional[BaseExtractor] = "deprecated" , ) -> str: """simple docstring""" os.makedirs(os.path.dirname(__SCREAMING_SNAKE_CASE ) , exist_ok=__SCREAMING_SNAKE_CASE ) # Prevent parallel extractions snake_case_ : Union[str, Any] = str(Path(__SCREAMING_SNAKE_CASE ).with_suffix('.lock' ) ) with FileLock(__SCREAMING_SNAKE_CASE ): shutil.rmtree(__SCREAMING_SNAKE_CASE , ignore_errors=__SCREAMING_SNAKE_CASE ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # passed as positional arg warnings.warn( 'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ' 'Use \'extractor_format\' instead.' , category=__SCREAMING_SNAKE_CASE , ) snake_case_ : Any = extractor if extractor != 'deprecated' else extractor_format else: snake_case_ : List[str] = cls.extractors[extractor_format] return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: warnings.warn( 'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ' 'exception in 3.0.0.' , category=__SCREAMING_SNAKE_CASE , ) for extractor in cls.extractors.values(): if extractor.is_extractable(__SCREAMING_SNAKE_CASE ): return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
327
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase ) else: __a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase ) for i, tensor in enumerate(_UpperCAmelCase ): if padding_side == "right": if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] return out_tensor.tolist() def __snake_case ( _UpperCAmelCase ): __a = ord(_UpperCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __a = unicodedata.category(_UpperCAmelCase ) if cat.startswith('''P''' ): return True return False @dataclass class _A ( __UpperCAmelCase ): UpperCamelCase__ : PreTrainedTokenizerBase UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = -100 UpperCamelCase__ : str = "pt" def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' import torch __a = '''label''' if '''label''' in features[0].keys() else '''labels''' __a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __a = self.tokenizer.pad( __SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __a = torch.tensor(batch['''entity_ids''']).shape[1] __a = self.tokenizer.padding_side if padding_side == "right": __a = [ list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels ] else: __a = [ [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels ] __a = [feature['''ner_tags'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = [feature['''original_entity_spans'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()} return batch
49
0
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union __A : Tuple = TypeVar("T") __A : int = Union[List[T], Tuple[T, ...]] __A : Union[str, Any] = Union[T, List[T], Dict[str, T]] __A : str = Union[str, bytes, os.PathLike]
260
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def __snake_case ( ): __a , __a = 9, 14 # noqa: F841 __a = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __a = defaultdict(_UpperCAmelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __a = mst(_UpperCAmelCase ) __a = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __a = tuple(answer[:2] ) __a = tuple(edge[::-1] ) assert edge in result or reverse in result
49
0
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class _lowercase ( __UpperCAmelCase): """simple docstring""" def __get__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) lowerCamelCase__ : str = "__cached_" + self.fget.__name__ lowerCamelCase__ : str = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if cached is None: lowerCamelCase__ : int = self.fget(__SCREAMING_SNAKE_CASE ) setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return cached def lowercase_ ( _A : Tuple ): """simple docstring""" lowerCamelCase__ : Tuple = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"invalid truth value {val!r}" ) def lowercase_ ( _A : Optional[Any] ): """simple docstring""" if is_torch_fx_proxy(_UpperCAmelCase ): return True if is_torch_available(): import torch if isinstance(_UpperCAmelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_UpperCAmelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_UpperCAmelCase , (jnp.ndarray, Tracer) ): return True return isinstance(_UpperCAmelCase , np.ndarray ) def lowercase_ ( _A : int ): """simple docstring""" return isinstance(_UpperCAmelCase , np.ndarray ) def lowercase_ ( _A : str ): """simple docstring""" return _is_numpy(_UpperCAmelCase ) def lowercase_ ( _A : Tuple ): """simple docstring""" import torch return isinstance(_UpperCAmelCase , torch.Tensor ) def lowercase_ ( _A : Optional[int] ): """simple docstring""" return False if not is_torch_available() else _is_torch(_UpperCAmelCase ) def lowercase_ ( _A : Optional[Any] ): """simple docstring""" import torch return isinstance(_UpperCAmelCase , torch.device ) def lowercase_ ( _A : Optional[int] ): """simple docstring""" return False if not is_torch_available() else _is_torch_device(_UpperCAmelCase ) def lowercase_ ( _A : Dict ): """simple docstring""" import torch if isinstance(_UpperCAmelCase , _UpperCAmelCase ): if hasattr(_UpperCAmelCase , _UpperCAmelCase ): lowerCamelCase__ : List[str] = getattr(_UpperCAmelCase , _UpperCAmelCase ) else: return False return isinstance(_UpperCAmelCase , torch.dtype ) def lowercase_ ( _A : Dict ): """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(_UpperCAmelCase ) def lowercase_ ( _A : Tuple ): """simple docstring""" import tensorflow as tf return isinstance(_UpperCAmelCase , tf.Tensor ) def lowercase_ ( _A : Tuple ): """simple docstring""" return False if not is_tf_available() else _is_tensorflow(_UpperCAmelCase ) def lowercase_ ( _A : Tuple ): """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_UpperCAmelCase , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(_UpperCAmelCase ) return type(_UpperCAmelCase ) == tf.Tensor def lowercase_ ( _A : List[str] ): """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(_UpperCAmelCase ) def lowercase_ ( _A : str ): """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(_UpperCAmelCase , jnp.ndarray ) def lowercase_ ( _A : str ): """simple docstring""" return False if not is_flax_available() else _is_jax(_UpperCAmelCase ) def lowercase_ ( _A : Union[str, Any] ): """simple docstring""" if isinstance(_UpperCAmelCase , (dict, UserDict) ): return {k: to_py_obj(_UpperCAmelCase ) for k, v in obj.items()} elif isinstance(_UpperCAmelCase , (list, tuple) ): return [to_py_obj(_UpperCAmelCase ) for o in obj] elif is_tf_tensor(_UpperCAmelCase ): return obj.numpy().tolist() elif is_torch_tensor(_UpperCAmelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(_UpperCAmelCase ): return np.asarray(_UpperCAmelCase ).tolist() elif isinstance(_UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def lowercase_ ( _A : Optional[Any] ): """simple docstring""" if isinstance(_UpperCAmelCase , (dict, UserDict) ): return {k: to_numpy(_UpperCAmelCase ) for k, v in obj.items()} elif isinstance(_UpperCAmelCase , (list, tuple) ): return np.array(_UpperCAmelCase ) elif is_tf_tensor(_UpperCAmelCase ): return obj.numpy() elif is_torch_tensor(_UpperCAmelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(_UpperCAmelCase ): return np.asarray(_UpperCAmelCase ) else: return obj class _lowercase ( __UpperCAmelCase): """simple docstring""" def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Any = fields(self ) # Safety and consistency checks if not len(__SCREAMING_SNAKE_CASE ): raise ValueError(f"{self.__class__.__name__} has no fields." ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"{self.__class__.__name__} should not have more than one required field." ) lowerCamelCase__ : List[str] = getattr(self , class_fields[0].name ) lowerCamelCase__ : Optional[Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__SCREAMING_SNAKE_CASE ): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCamelCase__ : Dict = first_field.items() lowerCamelCase__ : Union[str, Any] = True else: try: lowerCamelCase__ : Union[str, Any] = iter(__SCREAMING_SNAKE_CASE ) lowerCamelCase__ : Tuple = True except TypeError: lowerCamelCase__ : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__SCREAMING_SNAKE_CASE ): if ( not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) or not len(__SCREAMING_SNAKE_CASE ) == 2 or not isinstance(element[0] , __SCREAMING_SNAKE_CASE ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute lowerCamelCase__ : Dict = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." ) break setattr(self , element[0] , element[1] ) if element[1] is not None: lowerCamelCase__ : Dict = element[1] elif first_field is not None: lowerCamelCase__ : Union[str, Any] = first_field else: for field in class_fields: lowerCamelCase__ : Optional[Any] = getattr(self , field.name ) if v is not None: lowerCamelCase__ : Union[str, Any] = v def __delitem__( self : str , *__lowerCamelCase : Any , **__lowerCamelCase : str ): '''simple docstring''' raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." ) def lowerCAmelCase ( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ): '''simple docstring''' raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." ) def lowerCAmelCase ( self : Dict , *__lowerCamelCase : int , **__lowerCamelCase : int ): '''simple docstring''' raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." ) def lowerCAmelCase ( self : str , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." ) def __getitem__( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCamelCase__ : Optional[Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Any , __lowerCamelCase : str , __lowerCamelCase : int ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __setitem__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class _lowercase ( __UpperCAmelCase , __UpperCAmelCase): """simple docstring""" @classmethod def lowerCAmelCase ( cls : int , __lowerCamelCase : int ): '''simple docstring''' raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" ) class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = '''longest''' A__ = '''max_length''' A__ = '''do_not_pad''' class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = '''pt''' A__ = '''tf''' A__ = '''np''' A__ = '''jax''' class _lowercase : """simple docstring""" def __init__( self : Optional[Any] , __lowerCamelCase : List[ContextManager] ): '''simple docstring''' lowerCamelCase__ : List[Any] = context_managers lowerCamelCase__ : str = ExitStack() def __enter__( self : Union[str, Any] ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(__SCREAMING_SNAKE_CASE ) def __exit__( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : List[str] ): '''simple docstring''' self.stack.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowercase_ ( _A : List[Any] ): """simple docstring""" lowerCamelCase__ : int = infer_framework(_UpperCAmelCase ) if framework == "tf": lowerCamelCase__ : str = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCamelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: lowerCamelCase__ : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : List[Any] = model_class.__name__ lowerCamelCase__ : List[Any] = infer_framework(_UpperCAmelCase ) if framework == "tf": lowerCamelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCamelCase__ : Any = inspect.signature(model_class.forward ) # PyTorch models else: lowerCamelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def lowercase_ ( _A : Dict , _A : Optional[int] = "" , _A : str = "." ): """simple docstring""" def _flatten_dict(_A : int , _A : Tuple="" , _A : List[Any]="." ): for k, v in d.items(): lowerCamelCase__ : Optional[int] = str(_UpperCAmelCase ) + delimiter + str(_UpperCAmelCase ) if parent_key else k if v and isinstance(_UpperCAmelCase , _UpperCAmelCase ): yield from flatten_dict(_UpperCAmelCase , _UpperCAmelCase , delimiter=_UpperCAmelCase ).items() else: yield key, v return dict(_flatten_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ) @contextmanager def lowercase_ ( _A : List[Any] , _A : List[Any] = False ): """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def lowercase_ ( _A : Tuple , _A : Optional[Any]=None ): """simple docstring""" if is_numpy_array(_UpperCAmelCase ): return np.transpose(_UpperCAmelCase , axes=_UpperCAmelCase ) elif is_torch_tensor(_UpperCAmelCase ): return array.T if axes is None else array.permute(*_UpperCAmelCase ) elif is_tf_tensor(_UpperCAmelCase ): import tensorflow as tf return tf.transpose(_UpperCAmelCase , perm=_UpperCAmelCase ) elif is_jax_tensor(_UpperCAmelCase ): return jnp.transpose(_UpperCAmelCase , axes=_UpperCAmelCase ) else: raise ValueError(F"Type not supported for transpose: {type(_UpperCAmelCase )}." ) def lowercase_ ( _A : Tuple , _A : int ): """simple docstring""" if is_numpy_array(_UpperCAmelCase ): return np.reshape(_UpperCAmelCase , _UpperCAmelCase ) elif is_torch_tensor(_UpperCAmelCase ): return array.reshape(*_UpperCAmelCase ) elif is_tf_tensor(_UpperCAmelCase ): import tensorflow as tf return tf.reshape(_UpperCAmelCase , _UpperCAmelCase ) elif is_jax_tensor(_UpperCAmelCase ): return jnp.reshape(_UpperCAmelCase , _UpperCAmelCase ) else: raise ValueError(F"Type not supported for reshape: {type(_UpperCAmelCase )}." ) def lowercase_ ( _A : int , _A : str=None ): """simple docstring""" if is_numpy_array(_UpperCAmelCase ): return np.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase ) elif is_torch_tensor(_UpperCAmelCase ): return array.squeeze() if axis is None else array.squeeze(dim=_UpperCAmelCase ) elif is_tf_tensor(_UpperCAmelCase ): import tensorflow as tf return tf.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase ) elif is_jax_tensor(_UpperCAmelCase ): return jnp.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase ) else: raise ValueError(F"Type not supported for squeeze: {type(_UpperCAmelCase )}." ) def lowercase_ ( _A : List[str] , _A : List[str] ): """simple docstring""" if is_numpy_array(_UpperCAmelCase ): return np.expand_dims(_UpperCAmelCase , _UpperCAmelCase ) elif is_torch_tensor(_UpperCAmelCase ): return array.unsqueeze(dim=_UpperCAmelCase ) elif is_tf_tensor(_UpperCAmelCase ): import tensorflow as tf return tf.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase ) elif is_jax_tensor(_UpperCAmelCase ): return jnp.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase ) else: raise ValueError(F"Type not supported for expand_dims: {type(_UpperCAmelCase )}." ) def lowercase_ ( _A : Dict ): """simple docstring""" if is_numpy_array(_UpperCAmelCase ): return np.size(_UpperCAmelCase ) elif is_torch_tensor(_UpperCAmelCase ): return array.numel() elif is_tf_tensor(_UpperCAmelCase ): import tensorflow as tf return tf.size(_UpperCAmelCase ) elif is_jax_tensor(_UpperCAmelCase ): return array.size else: raise ValueError(F"Type not supported for expand_dims: {type(_UpperCAmelCase )}." ) def lowercase_ ( _A : Tuple , _A : List[Any] ): """simple docstring""" for key, value in auto_map.items(): if isinstance(_UpperCAmelCase , (tuple, list) ): lowerCamelCase__ : str = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: lowerCamelCase__ : Union[str, Any] = F"{repo_id}--{value}" return auto_map def lowercase_ ( _A : Tuple ): """simple docstring""" for base_class in inspect.getmro(_UpperCAmelCase ): lowerCamelCase__ : Any = base_class.__module__ lowerCamelCase__ : Optional[int] = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"Could not infer framework from class {model_class}." )
184
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] __a = '''fp16''' self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
49
0
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ (__UpperCAmelCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = (EulerDiscreteScheduler,) __UpperCamelCase : List[str] = 10 def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = { """num_train_timesteps""": 11_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**__SCREAMING_SNAKE_CASE ) return config def __magic_name__ (self ) -> List[Any]: """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def __magic_name__ (self ) -> Tuple: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def __magic_name__ (self ) -> int: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : int = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_model() SCREAMING_SNAKE_CASE__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : str = sample.to(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : List[Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : List[str] = output.prev_sample SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE__ : Dict = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : List[str] = sample.to(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : int = output.prev_sample SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 0.0002 ) < 1E-2 assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3 def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE__ : List[str] = sample.to(__SCREAMING_SNAKE_CASE ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : int = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : int = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : Dict = output.prev_sample SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE , use_karras_sigmas=__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE__ : Any = sample.to(__SCREAMING_SNAKE_CASE ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : List[Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : List[str] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : str = output.prev_sample SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE__ : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2 assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
25
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __snake_case :Dict = '''bart''' __snake_case :Tuple = True @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): if LOAD_DENSE_INDEX: __a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __a = qar_model.eval() else: __a , __a = (None, None) if MODEL_TYPE == "bart": __a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __a = sas_model.eval() else: __a , __a = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): if LOAD_DENSE_INDEX: __a = faiss.StandardGpuResources() __a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __a = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __a = faiss.IndexFlatIP(128 ) __a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase ) wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU else: __a , __a = (None, None) __a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): __a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __a = elia['''train_eli5'''] __a = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __a = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_UpperCAmelCase ) return (elia_train, eli5_train_q_index) __snake_case ,__snake_case ,__snake_case :List[str] = load_indexes() __snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models() __snake_case ,__snake_case :Tuple = load_train_data() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ): __a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase ) __a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase ) __a = [elia_train[int(_UpperCAmelCase )] for i in I[0]] return nn_examples def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ): if source == "none": __a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __a , __a = query_qa_dense_index( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: __a , __a = query_es_index( _UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , ) __a = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _UpperCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None), } ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ): with torch.no_grad(): __a = qa_sas_generate( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar __snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' __snake_case :int = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __snake_case :int = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) __snake_case :Union[str, Any] = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] __snake_case :int = st.sidebar.checkbox('''Demo options''') if demo_options: __snake_case :str = st.sidebar.selectbox( '''''', action_list, index=3, ) __snake_case :Tuple = action_list.index(action_st) __snake_case :Optional[int] = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) __snake_case :Dict = show_type == '''Show full text of passages''' else: __snake_case :Dict = 3 __snake_case :str = True __snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: __snake_case :List[str] = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) __snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) __snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: __snake_case :Optional[int] = '''wiki40b''' __snake_case :Dict = '''dense''' __snake_case :Dict = '''beam''' __snake_case :int = 2 __snake_case :str = 64 __snake_case :Tuple = 256 __snake_case :int = None __snake_case :List[Any] = None __snake_case :int = st.sidebar.checkbox('''Generation options''') if generate_options: __snake_case :Tuple = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) __snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) __snake_case :Dict = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __snake_case :Dict = st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __snake_case :Tuple = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) __snake_case :Any = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) __snake_case :Any = None # start main text __snake_case :Dict = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] __snake_case :int = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": __snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''') else: __snake_case :Optional[int] = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": __snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10) __snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10) __snake_case :Optional[Any] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __snake_case :Union[str, Any] = support_list[:10] __snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: __snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __snake_case ,__snake_case :Optional[int] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): __snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) __snake_case :int = res[1].strip() if sec_titles == "": __snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url) else: __snake_case :Optional[int] = sec_titles.split(''' & ''') __snake_case :str = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: __snake_case :str = find_nearest_training(question) __snake_case :str = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) __snake_case :Optional[Any] = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) __snake_case :Tuple = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
49
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : str ) ->List[Any]: return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def lowerCamelCase ( __lowerCamelCase : Optional[int] ) ->Any: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = 11 _SCREAMING_SNAKE_CASE = int("""1""" + """0""" * digit_len ) for num in range(_UpperCAmelCase , _UpperCAmelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(_UpperCAmelCase , _UpperCAmelCase ): solutions.append(F'{num}/{den}' ) den += 1 num += 1 _SCREAMING_SNAKE_CASE = 10 return solutions def lowerCamelCase ( __lowerCamelCase : List[Any] = 2 ) ->str: _SCREAMING_SNAKE_CASE = 1.0 for fraction in fraction_list(_UpperCAmelCase ): _SCREAMING_SNAKE_CASE = Fraction(_UpperCAmelCase ) result *= frac.denominator / frac.numerator return int(_UpperCAmelCase ) if __name__ == "__main__": print(solution())
58
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _A ( __UpperCAmelCase ): def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = eval_examples __a = post_process_function def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = gen_kwargs.copy() __a = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length ) __a = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams ) __a = gen_kwargs __a = self.eval_dataset if eval_dataset is None else eval_dataset __a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE) __a = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) else: __a = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE) return metrics def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = gen_kwargs.copy() __a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE) # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''') __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
49
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A =logging.get_logger(__name__) __A ={ '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _snake_case ( __UpperCAmelCase ): lowerCAmelCase :Optional[int] = '''sew-d''' def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase=2 , _lowerCamelCase=512 , _lowerCamelCase=256 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=("p2c", "c2p") , _lowerCamelCase="layer_norm" , _lowerCamelCase="gelu_python" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-7 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , **_lowerCamelCase , ): super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = feat_extract_norm UpperCAmelCase__ : int = feat_extract_activation UpperCAmelCase__ : Tuple = list(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Dict = list(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : List[Any] = list(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Tuple = conv_bias UpperCAmelCase__ : int = num_conv_pos_embeddings UpperCAmelCase__ : List[str] = num_conv_pos_embedding_groups UpperCAmelCase__ : str = len(self.conv_dim) UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : str = intermediate_size UpperCAmelCase__ : Any = squeeze_factor UpperCAmelCase__ : List[Any] = max_position_embeddings UpperCAmelCase__ : Dict = position_buckets UpperCAmelCase__ : List[str] = share_att_key UpperCAmelCase__ : Union[str, Any] = relative_attention UpperCAmelCase__ : Union[str, Any] = norm_rel_ebd UpperCAmelCase__ : Dict = list(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : List[str] = hidden_dropout UpperCAmelCase__ : Optional[int] = attention_dropout UpperCAmelCase__ : Tuple = activation_dropout UpperCAmelCase__ : List[str] = feat_proj_dropout UpperCAmelCase__ : List[Any] = final_dropout UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : List[Any] = feature_layer_norm_eps UpperCAmelCase__ : str = initializer_range UpperCAmelCase__ : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase__ : List[Any] = apply_spec_augment UpperCAmelCase__ : Dict = mask_time_prob UpperCAmelCase__ : List[str] = mask_time_length UpperCAmelCase__ : int = mask_time_min_masks UpperCAmelCase__ : int = mask_feature_prob UpperCAmelCase__ : Union[str, Any] = mask_feature_length UpperCAmelCase__ : List[str] = mask_feature_min_masks # ctc loss UpperCAmelCase__ : Any = ctc_loss_reduction UpperCAmelCase__ : Tuple = ctc_zero_infinity # sequence classification UpperCAmelCase__ : Optional[Any] = use_weighted_layer_sum UpperCAmelCase__ : str = classifier_proj_size @property def snake_case__ ( self): return functools.reduce(operator.mul , self.conv_stride , 1)
163
from __future__ import annotations from typing import Any def __snake_case ( _UpperCAmelCase ): if not postfix_notation: return 0 __a = {'''+''', '''-''', '''*''', '''/'''} __a = [] for token in postfix_notation: if token in operations: __a , __a = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_UpperCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
49
0
import math import random from typing import Any from .hill_climbing import SearchProblem def __A ( __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = math.inf , __lowerCAmelCase = -math.inf , __lowerCAmelCase = math.inf , __lowerCAmelCase = -math.inf , __lowerCAmelCase = False , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.01 , __lowerCAmelCase = 1 , )-> Tuple: """simple docstring""" _UpperCAmelCase = False _UpperCAmelCase = search_prob _UpperCAmelCase = start_temperate _UpperCAmelCase = [] _UpperCAmelCase = 0 _UpperCAmelCase = None while not search_end: _UpperCAmelCase = current_state.score() if best_state is None or current_score > best_state.score(): _UpperCAmelCase = current_state scores.append(_UpperCAmelCase ) iterations += 1 _UpperCAmelCase = None _UpperCAmelCase = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to _UpperCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 ) # picking a random neighbor _UpperCAmelCase = neighbors.pop(_UpperCAmelCase ) _UpperCAmelCase = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: _UpperCAmelCase = change * -1 # in case we are finding minimum if change > 0: # improves the solution _UpperCAmelCase = picked_neighbor else: _UpperCAmelCase = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability _UpperCAmelCase = picked_neighbor _UpperCAmelCase = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor _UpperCAmelCase = True else: _UpperCAmelCase = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(_UpperCAmelCase ) , _UpperCAmelCase ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _a = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) _a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _a = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str: """simple docstring""" return (3 * x**2) - (6 * y) _a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _a = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) _a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _a = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
39
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __snake_case :Optional[int] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __snake_case :List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __snake_case :List[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = random.randint(0 , len(_UpperCAmelCase ) - 1 ) __a = parent_a[:random_slice] + parent_a[random_slice:] __a = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __a = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): __a = [] # Generate more children proportionally to the fitness score. __a = int(parent_a[1] * 100 ) + 1 __a = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): __a = population_score[random.randint(0 , _UpperCAmelCase )][0] __a , __a = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __a = f'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. __a = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __a = f'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. __a = [] for _ in range(_UpperCAmelCase ): population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. __a , __a = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. __a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'\nGeneration: {generation}' f'\nTotal Population:{total_population}' f'\nBest score: {population_score[0][1]}' f'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __a = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. __a = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __snake_case :Optional[int] = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __snake_case :List[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
49
0
"""simple docstring""" from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _lowerCamelCase ( __UpperCAmelCase ): def __init__(self , **__a ) -> Any: super().__init__(**__SCREAMING_SNAKE_CASE ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__(self , __a , **__a ) -> Optional[Any]: return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def snake_case_ (self , **__a ) -> Tuple: UpperCamelCase = {} if "candidate_labels" in kwargs: UpperCamelCase = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: UpperCamelCase = kwargs["hypothesis_template"] return preprocess_params, {}, {} def snake_case_ (self , __a , __a=None , __a="This is a sound of {}." ) -> Optional[Any]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if audio.startswith("http://" ) or audio.startswith("https://" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png UpperCamelCase = requests.get(__SCREAMING_SNAKE_CASE ).content else: with open(__SCREAMING_SNAKE_CASE , "rb" ) as f: UpperCamelCase = f.read() if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase = ffmpeg_read(__SCREAMING_SNAKE_CASE , self.feature_extractor.sampling_rate ) if not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): raise ValueError("We expect a numpy ndarray as input" ) if len(audio.shape ) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" ) UpperCamelCase = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" ) UpperCamelCase = candidate_labels UpperCamelCase = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels] UpperCamelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase = [text_inputs] return inputs def snake_case_ (self , __a ) -> Optional[int]: UpperCamelCase = model_inputs.pop("candidate_labels" ) UpperCamelCase = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ): UpperCamelCase = text_inputs[0] else: # Batching case. UpperCamelCase = text_inputs[0][0] UpperCamelCase = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def snake_case_ (self , __a ) -> Tuple: UpperCamelCase = model_outputs.pop("candidate_labels" ) UpperCamelCase = model_outputs["logits"][0] if self.framework == "pt": UpperCamelCase = logits.softmax(dim=0 ) UpperCamelCase = probs.tolist() else: raise ValueError("`tf` framework not supported." ) UpperCamelCase = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __a : -x[0] ) ] return result
153
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = LxmertConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) __a = LxmertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": __snake_case :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __snake_case :Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
49
0
A : Union[str, Any] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A : Optional[int] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
6
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ): __a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __a = f'{olid} is not a valid Open Library olid' raise ValueError(_UpperCAmelCase ) return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json() def __snake_case ( _UpperCAmelCase ): __a = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __a = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __a = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = ''', '''.join(_UpperCAmelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(f'\nSearching Open Library for ISBN: {isbn}...\n') try: __snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}')) print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'Sorry, there are no results for ISBN: {isbn}.')
49
0
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( _lowercase : Tuple) -> Optional[int]: """simple docstring""" return np.maximum(0 , _UpperCAmelCase) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
170
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _A ( __UpperCAmelCase ): def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths} __a = Text( cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[str]): '''simple docstring''' if self.streaming: __a = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: __a = None __a = None __a = None __a = None self.builder.download_and_prepare( download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) __a = self.builder.as_dataset( split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory) return dataset
49
0
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() _A : int = logging.get_logger(__name__) _A : str = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Dict ) -> Union[str, Any]: '''simple docstring''' __lowerCAmelCase = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 10_24, """hidden_size""": 7_68, """max_length""": 5_12, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 10_24, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1E-5, """token_type_vocab_size""": 2, } __lowerCAmelCase = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __lowerCAmelCase = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=_UpperCAmelCase , output_all_encodings=_UpperCAmelCase , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , _UpperCAmelCase ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __lowerCAmelCase = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab __lowerCAmelCase = os.path.join(get_home_dir() , """models""" ) __lowerCAmelCase = _load_vocab(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls=_UpperCAmelCase ) __lowerCAmelCase = nlp.model.BERTModel( _UpperCAmelCase , len(_UpperCAmelCase ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=_UpperCAmelCase , use_token_type_embed=_UpperCAmelCase , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=_UpperCAmelCase , use_decoder=_UpperCAmelCase , ) original_bort.load_parameters(_UpperCAmelCase , cast_dtype=_UpperCAmelCase , ignore_extra=_UpperCAmelCase ) __lowerCAmelCase = original_bort._collect_params_with_prefix() # Build our config 🤗 __lowerCAmelCase = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.0_2, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(_UpperCAmelCase ), } __lowerCAmelCase = BertConfig.from_dict(_UpperCAmelCase ) __lowerCAmelCase = BertForMaskedLM(_UpperCAmelCase ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case_ : str ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case_ : List[Any] , snake_case_ : Dict ): __lowerCAmelCase = hf_param.shape __lowerCAmelCase = to_torch(params[gluon_param] ) __lowerCAmelCase = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param __lowerCAmelCase = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) __lowerCAmelCase = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) __lowerCAmelCase = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) __lowerCAmelCase = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __lowerCAmelCase = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __lowerCAmelCase = hf_bort_model.bert.encoder.layer[i] # self attention __lowerCAmelCase = layer.attention.self __lowerCAmelCase = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) __lowerCAmelCase = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) __lowerCAmelCase = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) __lowerCAmelCase = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) __lowerCAmelCase = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) __lowerCAmelCase = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output __lowerCAmelCase = layer.attention.output __lowerCAmelCase = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) __lowerCAmelCase = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) __lowerCAmelCase = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) __lowerCAmelCase = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate __lowerCAmelCase = layer.intermediate __lowerCAmelCase = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) __lowerCAmelCase = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output __lowerCAmelCase = layer.output __lowerCAmelCase = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) __lowerCAmelCase = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) __lowerCAmelCase = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) __lowerCAmelCase = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __lowerCAmelCase = RobertaTokenizer.from_pretrained("""roberta-base""" ) __lowerCAmelCase = tokenizer.encode_plus(_UpperCAmelCase )["""input_ids"""] # Get gluon output __lowerCAmelCase = mx.nd.array([input_ids] ) __lowerCAmelCase = original_bort(inputs=_UpperCAmelCase , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(_UpperCAmelCase ) __lowerCAmelCase = BertModel.from_pretrained(_UpperCAmelCase ) hf_bort_model.eval() __lowerCAmelCase = tokenizer.encode_plus(_UpperCAmelCase , return_tensors="""pt""" ) __lowerCAmelCase = hf_bort_model(**_UpperCAmelCase )[0] __lowerCAmelCase = output_gluon[0].asnumpy() __lowerCAmelCase = output_hf[0].detach().numpy() __lowerCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item() __lowerCAmelCase = np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , _UpperCAmelCase ) if __name__ == "__main__": _A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _A : Union[str, Any] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
229
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __snake_case :List[str] = '''\ Text data. Second line of data.''' __snake_case :Optional[Any] = '''file''' @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') __a = bytes(_UpperCAmelCase , '''utf-8''' ) with zstd.open(_UpperCAmelCase , '''wb''' ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def __snake_case ( _UpperCAmelCase ): with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} __a = input_paths[compression_format] __a = tmp_path / '''cache''' __a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: __a = f.read() with open(_UpperCAmelCase ) as f: __a = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''custom_cache''' __a = '''custom_extracted_dir''' __a = tmp_path / '''custom_extracted_path''' if default_extracted: __a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) ) __a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __a = xz_file __a = ( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase ) ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path __a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path __a = '''./__missing_file__.txt''' with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = get_from_cache(f'tmp://{tmpfs_file}' ) with open(_UpperCAmelCase ) as f: __a = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( ): with pytest.raises(_UpperCAmelCase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head('''s3://huggingface.co''' )
49
0
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): @register_to_config def __init__( self : List[str] , _A : int , _A : int , _A : int , _A : float , _A : int , _A : int , _A : int , _A : int , _A : str , _A : bool = False , ) -> Dict: """simple docstring""" super().__init__() snake_case_ : Dict = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = False snake_case_ : List[Any] = nn.Dropout(p=__SCREAMING_SNAKE_CASE ) snake_case_ : Dict = TaConfig( vocab_size=__SCREAMING_SNAKE_CASE , d_model=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE , feed_forward_proj=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , ) snake_case_ : List[str] = nn.ModuleList() for lyr_num in range(__SCREAMING_SNAKE_CASE ): snake_case_ : int = TaBlock(__SCREAMING_SNAKE_CASE ) self.encoders.append(__SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = TaLayerNorm(__SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = nn.Dropout(p=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : List[str] , _A : List[Any] , _A : Optional[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : Union[str, Any] = self.token_embedder(__SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = encoder_input_tokens.shape[1] snake_case_ : Optional[int] = torch.arange(__SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device ) x += self.position_encoding(__SCREAMING_SNAKE_CASE ) snake_case_ : str = self.dropout_pre(__SCREAMING_SNAKE_CASE ) # inverted the attention mask snake_case_ : int = encoder_input_tokens.size() snake_case_ : Any = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for lyr in self.encoders: snake_case_ : Any = lyr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0] snake_case_ : Dict = self.layer_norm(__SCREAMING_SNAKE_CASE ) return self.dropout_post(__SCREAMING_SNAKE_CASE ), encoder_inputs_mask
327
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = (DDPMParallelScheduler,) def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE) return config def _lowerCamelCase ( self : List[str]): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5 def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = self.dummy_sample_deter + 0.1 __a = self.dummy_sample_deter - 0.1 __a = samplea.shape[0] __a = torch.stack([samplea, samplea, samplea] , dim=0) __a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE) __a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) __a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1)) __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 11_53.18_33) < 1E-2 assert abs(result_mean.item() - 0.50_05) < 1E-3 def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_58.96_06) < 1E-2 assert abs(result_mean.item() - 0.33_72) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(prediction_type='''v_prediction''') __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_02.02_96) < 1E-2 assert abs(result_mean.item() - 0.26_31) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) __a = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE): if i == len(__SCREAMING_SNAKE_CASE) - 1: __a = -1 else: __a = timesteps[i + 1] __a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE) __a = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] __a = len(__SCREAMING_SNAKE_CASE) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Optional[int]="pt" ): '''simple docstring''' _UpperCAmelCase = {'''add_prefix_space''': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(''' ''' ) else {} _UpperCAmelCase = padding_side return tokenizer( [line] , max_length=_UpperCAmelCase , padding='''max_length''' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any=None , ): '''simple docstring''' _UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class _a ( __UpperCAmelCase): """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Any="train" , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any="" , )->Optional[Any]: super().__init__() _UpperCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.source''' ) _UpperCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.target''' ) _UpperCAmelCase = self.get_char_lens(self.src_file ) _UpperCAmelCase = max_source_length _UpperCAmelCase = max_target_length assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}' _UpperCAmelCase = tokenizer _UpperCAmelCase = prefix if n_obs is not None: _UpperCAmelCase = self.src_lens[:n_obs] _UpperCAmelCase = src_lang _UpperCAmelCase = tgt_lang def __len__( self : Any )->Union[str, Any]: return len(self.src_lens ) def __getitem__( self : Tuple , __UpperCamelCase : Dict )->Tuple: _UpperCAmelCase = index + 1 # linecache starts at 1 _UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , __SCREAMING_SNAKE_CASE ).rstrip('''\n''' ) _UpperCAmelCase = linecache.getline(str(self.tgt_file ) , __SCREAMING_SNAKE_CASE ).rstrip('''\n''' ) assert source_line, F'empty source line for index {index}' assert tgt_line, F'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right _UpperCAmelCase = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ) else self.tokenizer ) _UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ) else self.tokenizer _UpperCAmelCase = encode_line(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.max_source_length , '''right''' ) _UpperCAmelCase = encode_line(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.max_target_length , '''right''' ) _UpperCAmelCase = source_inputs['''input_ids'''].squeeze() _UpperCAmelCase = target_inputs['''input_ids'''].squeeze() _UpperCAmelCase = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowercase__ ( __UpperCamelCase : Tuple )->int: return [len(__SCREAMING_SNAKE_CASE ) for x in Path(__SCREAMING_SNAKE_CASE ).open().readlines()] def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] )->List[str]: _UpperCAmelCase = torch.stack([x['''input_ids'''] for x in batch] ) _UpperCAmelCase = torch.stack([x['''attention_mask'''] for x in batch] ) _UpperCAmelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] ) _UpperCAmelCase = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ) else self.tokenizer.pad_token_id ) _UpperCAmelCase = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ) else self.tokenizer.pad_token_id ) _UpperCAmelCase = trim_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = trim_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch __A : Optional[int] = getLogger(__name__) def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' return list(itertools.chain.from_iterable(_UpperCAmelCase ) ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = get_git_info() save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''git_log.json''' ) ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any=4 , **_SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' with open(_UpperCAmelCase , '''w''' ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' with open(_UpperCAmelCase ) as f: return json.load(_UpperCAmelCase ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase ) _UpperCAmelCase = { '''repo_id''': str(_UpperCAmelCase ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' return list(map(_UpperCAmelCase , _UpperCAmelCase ) ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' with open(_UpperCAmelCase , '''wb''' ) as f: return pickle.dump(_UpperCAmelCase , _UpperCAmelCase ) def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' def remove_articles(_SCREAMING_SNAKE_CASE : Optional[Any] ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , _UpperCAmelCase ) def white_space_fix(_SCREAMING_SNAKE_CASE : str ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Union[str, Any] ): _UpperCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Union[str, Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) ) def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split() _UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split() _UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase ) _UpperCAmelCase = sum(common.values() ) if num_same == 0: return 0 _UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase ) _UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase ) _UpperCAmelCase = (2 * precision * recall) / (precision + recall) return fa def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) _UpperCAmelCase = 0 for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ): em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: em /= len(_UpperCAmelCase ) return {"em": em} def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' return model_prefix.startswith('''rag''' ) def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' _UpperCAmelCase = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead _UpperCAmelCase = '''dropout_rate''' for p in extra_params: if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(_UpperCAmelCase ) ) delattr(_UpperCAmelCase , _UpperCAmelCase ) continue _UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p] setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) ) delattr(_UpperCAmelCase , _UpperCAmelCase ) return hparams, config
260
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __snake_case :List[Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) requires_backends(self , '''vision''') requires_backends(self , '''torch''') if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.') self.check_model_type(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = {} __a = {} __a = {} # preprocess args if "points_per_batch" in kwargs: __a = kwargs['''points_per_batch'''] if "points_per_crop" in kwargs: __a = kwargs['''points_per_crop'''] if "crops_n_layers" in kwargs: __a = kwargs['''crops_n_layers'''] if "crop_overlap_ratio" in kwargs: __a = kwargs['''crop_overlap_ratio'''] if "crop_n_points_downscale_factor" in kwargs: __a = kwargs['''crop_n_points_downscale_factor'''] # postprocess args if "pred_iou_thresh" in kwargs: __a = kwargs['''pred_iou_thresh'''] if "stability_score_offset" in kwargs: __a = kwargs['''stability_score_offset'''] if "mask_threshold" in kwargs: __a = kwargs['''mask_threshold'''] if "stability_score_thresh" in kwargs: __a = kwargs['''stability_score_thresh'''] if "crops_nms_thresh" in kwargs: __a = kwargs['''crops_nms_thresh'''] if "output_rle_mask" in kwargs: __a = kwargs['''output_rle_mask'''] if "output_bboxes_mask" in kwargs: __a = kwargs['''output_bboxes_mask'''] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ): '''simple docstring''' __a = load_image(__SCREAMING_SNAKE_CASE) __a = self.image_processor.size['''longest_edge'''] __a , __a , __a , __a = self.image_processor.generate_crop_boxes( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') with self.device_placement(): if self.framework == "pt": __a = self.get_inference_context() with inference_context(): __a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''')) __a = image_embeddings __a = grid_points.shape[1] __a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( '''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ''' '''To return all points at once, set points_per_batch to None''') for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = grid_points[:, i : i + points_per_batch, :, :] __a = input_labels[:, i : i + points_per_batch] __a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ): '''simple docstring''' __a = model_inputs.pop('''input_boxes''') __a = model_inputs.pop('''is_last''') __a = model_inputs.pop('''original_sizes''').tolist() __a = model_inputs.pop('''reshaped_input_sizes''').tolist() __a = self.model(**__SCREAMING_SNAKE_CASE) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __a = model_outputs['''pred_masks'''] __a = self.image_processor.post_process_masks( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE) __a = model_outputs['''iou_scores'''] __a , __a , __a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ): '''simple docstring''' __a = [] __a = [] __a = [] for model_output in model_outputs: all_scores.append(model_output.pop('''iou_scores''')) all_masks.extend(model_output.pop('''masks''')) all_boxes.append(model_output.pop('''boxes''')) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a , __a , __a , __a = self.image_processor.post_process_for_mask_generation( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = defaultdict(__SCREAMING_SNAKE_CASE) for output in model_outputs: for k, v in output.items(): extra[k].append(__SCREAMING_SNAKE_CASE) __a = {} if output_rle_mask: __a = rle_mask if output_bboxes_mask: __a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
49
0
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = '''''' A__ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) A__ = None # compression type in fsspec. ex: "gzip" A__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , __lowerCamelCase : str = "" , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[dict] = None , **__lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(self , **__SCREAMING_SNAKE_CASE ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowerCamelCase__ : int = fsspec.open( __SCREAMING_SNAKE_CASE , mode="rb" , protocol=__SCREAMING_SNAKE_CASE , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowerCamelCase__ : Optional[Any] = os.path.basename(self.file.path.split("::" )[0] ) lowerCamelCase__ : List[str] = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) lowerCamelCase__ : Tuple = None @classmethod def lowerCAmelCase ( cls : Optional[Any] , __lowerCamelCase : int ): '''simple docstring''' return super()._strip_protocol(__SCREAMING_SNAKE_CASE ).lstrip("/" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' if self.dir_cache is None: lowerCamelCase__ : Any = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} lowerCamelCase__ : Optional[Any] = {f["name"]: f} def lowerCAmelCase ( self : Any , __lowerCamelCase : str ): '''simple docstring''' return self.file.open().read() def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : str = "rb" , __lowerCamelCase : int=None , __lowerCamelCase : int=True , __lowerCamelCase : str=None , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : int = self._strip_protocol(__SCREAMING_SNAKE_CASE ) if mode != "rb": raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'" ) return self.file.open() class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = '''bz2''' A__ = '''bz2''' A__ = '''.bz2''' class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = '''gzip''' A__ = '''gzip''' A__ = '''.gz''' class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = '''lz4''' A__ = '''lz4''' A__ = '''.lz4''' class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = '''xz''' A__ = '''xz''' A__ = '''.xz''' class _lowercase ( __UpperCAmelCase): """simple docstring""" A__ = '''zstd''' A__ = '''zstd''' A__ = '''.zst''' def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str = "rb" , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : int = DEFAULT_BLOCK_SIZE , **__lowerCamelCase : str , ): '''simple docstring''' super().__init__( fo=__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE , target_protocol=__SCREAMING_SNAKE_CASE , target_options=__SCREAMING_SNAKE_CASE , block_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowerCamelCase__ : Tuple = self.file.__enter__ class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = file_ def __enter__( self : Union[str, Any] ): '''simple docstring''' self._file.__enter__() return self def __exit__( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : int ): '''simple docstring''' self._file.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def __iter__( self : Tuple ): '''simple docstring''' return iter(self._file ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' return next(self._file ) def __getattr__( self : Optional[int] , __lowerCamelCase : Any ): '''simple docstring''' return getattr(self._file , __SCREAMING_SNAKE_CASE ) def fixed_enter(*__lowerCamelCase : int , **__lowerCamelCase : Tuple ): return WrappedFile(_enter(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ) lowerCamelCase__ : int = fixed_enter
184
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __snake_case :str = logging.get_logger(__name__) __snake_case :int = {'''vocab_file''': '''vocab.txt'''} __snake_case :List[Any] = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __snake_case :List[str] = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __snake_case :Optional[int] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : int = ConvBertTokenizer def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars ): __a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''')) __a = do_lower_case __a = strip_accents __a = tokenize_chinese_chars __a = normalizer_class(**__SCREAMING_SNAKE_CASE) __a = do_lower_case def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None): '''simple docstring''' __a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' __a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE) return tuple(__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ : Tuple = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Any = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[str] = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[Any] = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case :Any = logging.get_logger(__name__) __snake_case :Optional[Any] = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } __snake_case :List[Any] = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def __snake_case ( _UpperCAmelCase ): __a = EfficientNetConfig() __a = CONFIG_MAP[model_name]['''hidden_dim'''] __a = CONFIG_MAP[model_name]['''width_coef'''] __a = CONFIG_MAP[model_name]['''depth_coef'''] __a = CONFIG_MAP[model_name]['''image_size'''] __a = CONFIG_MAP[model_name]['''dropout_rate'''] __a = CONFIG_MAP[model_name]['''dw_padding'''] __a = '''huggingface/label-files''' __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def __snake_case ( ): __a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im def __snake_case ( _UpperCAmelCase ): __a = CONFIG_MAP[model_name]['''image_size'''] __a = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , ) return preprocessor def __snake_case ( _UpperCAmelCase ): __a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] __a = sorted(set(_UpperCAmelCase ) ) __a = len(_UpperCAmelCase ) __a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )} __a = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: __a = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) __a = {} for item in rename_keys: if item[0] in original_param_names: __a = '''efficientnet.''' + item[1] __a = '''classifier.weight''' __a = '''classifier.bias''' return key_mapping def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for key, value in tf_params.items(): if "normalization" in key: continue __a = key_mapping[key] if "_conv" in key and "kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __a = torch.from_numpy(np.transpose(_UpperCAmelCase ) ) else: __a = torch.from_numpy(_UpperCAmelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCAmelCase ) @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = model_classes[model_name]( include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , ) __a = original_model.trainable_variables __a = original_model.non_trainable_variables __a = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __a = param.numpy() __a = list(tf_params.keys() ) # Load HuggingFace model __a = get_efficientnet_config(_UpperCAmelCase ) __a = EfficientNetForImageClassification(_UpperCAmelCase ).eval() __a = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) __a = rename_keys(_UpperCAmelCase ) replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Initialize preprocessor and preprocess input image __a = convert_image_processor(_UpperCAmelCase ) __a = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): __a = hf_model(**_UpperCAmelCase ) __a = outputs.logits.detach().numpy() # Original model inference __a = False __a = CONFIG_MAP[model_name]['''image_size'''] __a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __a = image.img_to_array(_UpperCAmelCase ) __a = np.expand_dims(_UpperCAmelCase , axis=0 ) __a = original_model.predict(_UpperCAmelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCAmelCase ): os.mkdir(_UpperCAmelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCAmelCase ) preprocessor.save_pretrained(_UpperCAmelCase ) if push_to_hub: # Push model and image processor to hub print(f'Pushing converted {model_name} to the hub...' ) __a = f'efficientnet-{model_name}' preprocessor.push_to_hub(_UpperCAmelCase ) hf_model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') __snake_case :Optional[int] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
49
0
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowercase_ = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] lowercase_ = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] lowercase_ = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowercase_ = f"""down_blocks.{i}.resnets.{j}.""" lowercase_ = f"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowercase_ = f"""down_blocks.{i}.attentions.{j}.""" lowercase_ = f"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowercase_ = f"""up_blocks.{i}.resnets.{j}.""" lowercase_ = f"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowercase_ = f"""up_blocks.{i}.attentions.{j}.""" lowercase_ = f"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowercase_ = f"""down_blocks.{i}.downsamplers.0.conv.""" lowercase_ = f"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowercase_ = f"""up_blocks.{i}.upsamplers.0.""" lowercase_ = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowercase_ = '''mid_block.attentions.0.''' lowercase_ = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowercase_ = f"""mid_block.resnets.{j}.""" lowercase_ = f"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCamelCase ( __lowerCamelCase : Any ) ->Optional[Any]: # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. _SCREAMING_SNAKE_CASE = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: _SCREAMING_SNAKE_CASE = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: _SCREAMING_SNAKE_CASE = v.replace(_UpperCAmelCase , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: _SCREAMING_SNAKE_CASE = v.replace(_UpperCAmelCase , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE = v _SCREAMING_SNAKE_CASE = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowercase_ = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowercase_ = f"""encoder.down_blocks.{i}.resnets.{j}.""" lowercase_ = f"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowercase_ = f"""down_blocks.{i}.downsamplers.0.""" lowercase_ = f"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowercase_ = f"""up_blocks.{i}.upsamplers.0.""" lowercase_ = f"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowercase_ = f"""decoder.up_blocks.{i}.resnets.{j}.""" lowercase_ = f"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowercase_ = f"""mid_block.resnets.{i}.""" lowercase_ = f"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowercase_ = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def lowerCamelCase ( __lowerCamelCase : Dict ) ->List[Any]: # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def lowerCamelCase ( __lowerCamelCase : str ) ->Tuple: _SCREAMING_SNAKE_CASE = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: _SCREAMING_SNAKE_CASE = v.replace(_UpperCAmelCase , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: _SCREAMING_SNAKE_CASE = v.replace(_UpperCAmelCase , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE = v _SCREAMING_SNAKE_CASE = {v: vae_state_dict[k] for k, v in mapping.items()} _SCREAMING_SNAKE_CASE = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F'mid.attn_1.{weight_name}.weight' in k: print(F'Reshaping {k} for SD format' ) _SCREAMING_SNAKE_CASE = reshape_weight_for_sd(_UpperCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowercase_ = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] lowercase_ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowercase_ = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowercase_ = {'''q''': 0, '''k''': 1, '''v''': 2} def lowerCamelCase ( __lowerCamelCase : Union[str, Any] ) ->Any: _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): _SCREAMING_SNAKE_CASE = k[: -len(""".q_proj.weight""" )] _SCREAMING_SNAKE_CASE = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: _SCREAMING_SNAKE_CASE = [None, None, None] _SCREAMING_SNAKE_CASE = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): _SCREAMING_SNAKE_CASE = k[: -len(""".q_proj.bias""" )] _SCREAMING_SNAKE_CASE = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: _SCREAMING_SNAKE_CASE = [None, None, None] _SCREAMING_SNAKE_CASE = v continue _SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) _SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) _SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase ) return new_state_dict def lowerCamelCase ( __lowerCamelCase : Union[str, Any] ) ->Union[str, Any]: return text_enc_dict if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowercase_ = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowercase_ = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowercase_ = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowercase_ = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowercase_ = load_file(unet_path, device="""cpu""") else: lowercase_ = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowercase_ = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowercase_ = load_file(vae_path, device="""cpu""") else: lowercase_ = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowercase_ = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowercase_ = load_file(text_enc_path, device="""cpu""") else: lowercase_ = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowercase_ = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowercase_ = convert_unet_state_dict(unet_state_dict) lowercase_ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowercase_ = convert_vae_state_dict(vae_state_dict) lowercase_ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowercase_ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowercase_ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} lowercase_ = convert_text_enc_state_dict_vaa(text_enc_dict) lowercase_ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: lowercase_ = convert_text_enc_state_dict(text_enc_dict) lowercase_ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowercase_ = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowercase_ = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowercase_ = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
58
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case :Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(f'{len(upper_files)} files contain uppercase characters:') print('''\n'''.join(upper_files) + '''\n''') __snake_case :Tuple = [file for file in filepaths if ''' ''' in file] if space_files: print(f'{len(space_files)} files contain space characters:') print('''\n'''.join(space_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if '''-''' in file] if hyphen_files: print(f'{len(hyphen_files)} files contain hyphen characters:') print('''\n'''.join(hyphen_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'{len(nodir_files)} files are not in a directory:') print('''\n'''.join(nodir_files) + '''\n''') __snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
49
0
'''simple docstring''' import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __UpperCAmelCase ): lowerCAmelCase :str = '''M-CLIP''' def __init__( self , _lowerCamelCase=1024 , _lowerCamelCase=768 , **_lowerCamelCase): UpperCAmelCase__ : List[str] = transformerDimSize UpperCAmelCase__ : Optional[Any] = imageDimSize super().__init__(**__SCREAMING_SNAKE_CASE) class _snake_case ( __UpperCAmelCase ): lowerCAmelCase :Optional[int] = MCLIPConfig def __init__( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase): super().__init__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Optional[int] = XLMRobertaModel(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : str = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Dict = self.transformer(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0] UpperCAmelCase__ : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(__SCREAMING_SNAKE_CASE), embs
163
from collections import defaultdict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = first_str.lower().strip() __a = second_str.lower().strip() # Remove whitespace __a = first_str.replace(''' ''' , '''''' ) __a = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False # Default values for count should be 0 __a = defaultdict(_UpperCAmelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(_UpperCAmelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __snake_case :Any = input('''Enter the first string ''').strip() __snake_case :int = input('''Enter the second string ''').strip() __snake_case :int = check_anagrams(input_a, input_b) print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
49
0
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> Optional[Any]: """simple docstring""" assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match""" _UpperCAmelCase = nn.Parameter(_UpperCAmelCase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match""" _UpperCAmelCase = nn.Parameter(_UpperCAmelCase ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict: """simple docstring""" _UpperCAmelCase = np.asarray(weights[0] ) _UpperCAmelCase = np.asarray(weights[1] ) _UpperCAmelCase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> str: """simple docstring""" _UpperCAmelCase = np.asarray(weights[0] ) _UpperCAmelCase = np.asarray(weights[1] ) _UpperCAmelCase = np.asarray(weights[2] ) _UpperCAmelCase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]: """simple docstring""" _UpperCAmelCase = weights[0][0][0] _UpperCAmelCase = np.asarray(layer_norm_a[0] ) _UpperCAmelCase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # lsh weights + output _UpperCAmelCase = weights[0][1] if len(_UpperCAmelCase ) < 4: set_layer_weights_in_torch_lsh(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase ) else: set_layer_weights_in_torch_local(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase ) # intermediate weighs _UpperCAmelCase = weights[2][0][1][2] # Chunked Feed Forward if len(_UpperCAmelCase ) == 4: _UpperCAmelCase = intermediate_weights[2] # layernorm 2 _UpperCAmelCase = np.asarray(intermediate_weights[0][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # intermediate dense _UpperCAmelCase = np.asarray(intermediate_weights[1][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) # intermediate out _UpperCAmelCase = np.asarray(intermediate_weights[4][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]: """simple docstring""" _UpperCAmelCase = torch_model.reformer # word embeds _UpperCAmelCase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(_UpperCAmelCase ) , ) if isinstance(weights[3] , _UpperCAmelCase ): _UpperCAmelCase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _UpperCAmelCase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"""{position_embeddings[emb_idx]} emb does not match""" _UpperCAmelCase = nn.Parameter(torch.tensor(_UpperCAmelCase ) ) _UpperCAmelCase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( _UpperCAmelCase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _UpperCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # output layer norm _UpperCAmelCase = np.asarray(weights[7][0] ) _UpperCAmelCase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , ) # output embeddings _UpperCAmelCase = np.asarray(weights[9][0] ) _UpperCAmelCase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> str: """simple docstring""" _UpperCAmelCase = ReformerConfig.from_json_file(_UpperCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) _UpperCAmelCase = ReformerModelWithLMHead(_UpperCAmelCase ) with open(_UpperCAmelCase , 'rb' ) as f: _UpperCAmelCase = pickle.load(_UpperCAmelCase )['weights'] set_model_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , config.hidden_size ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _a = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
39
import logging from transformers.configuration_utils import PretrainedConfig __snake_case :Any = logging.getLogger(__name__) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = '''masked_bert''' def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = pruning_method __a = mask_init __a = mask_scale
49
0
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase : def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1_28 , __a=32 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]: UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def snake_case_ (self ) -> int: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ (self ) -> List[Any]: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def snake_case_ (self ) -> List[str]: ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.prepare_config_and_inputs() UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]: UpperCamelCase = NezhaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) UpperCamelCase = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) UpperCamelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Any: UpperCamelCase = True UpperCamelCase = NezhaModel(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) UpperCamelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ) UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]: UpperCamelCase = NezhaForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple: UpperCamelCase = NezhaForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[str]: UpperCamelCase = NezhaForPreTraining(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , next_sentence_label=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a ) -> Dict: UpperCamelCase = NezhaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]: UpperCamelCase = self.num_labels UpperCamelCase = NezhaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]: UpperCamelCase = self.num_labels UpperCamelCase = NezhaForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a ) -> int: UpperCamelCase = self.num_choices UpperCamelCase = NezhaForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case_ (self ) -> List[Any]: UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): UpperCAmelCase_ = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) UpperCAmelCase_ = ( { '''feature-extraction''': NezhaModel, '''fill-mask''': NezhaForMaskedLM, '''question-answering''': NezhaForQuestionAnswering, '''text-classification''': NezhaForSequenceClassification, '''token-classification''': NezhaForTokenClassification, '''zero-shot''': NezhaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase_ = True def snake_case_ (self , __a , __a , __a=False ) -> List[Any]: UpperCamelCase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(__SCREAMING_SNAKE_CASE ): UpperCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) return inputs_dict def snake_case_ (self ) -> List[Any]: UpperCamelCase = NezhaModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case_ (self ) -> List[Any]: self.config_tester.run_common_tests() def snake_case_ (self ) -> List[Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> List[Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> Any: ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase = None self.model_tester.create_and_check_model_as_decoder( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> int: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> List[Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> List[str]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def snake_case_ (self ) -> List[str]: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = NezhaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @slow @require_torch_gpu def snake_case_ (self ) -> List[Any]: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return UpperCamelCase = True UpperCamelCase = model_class(config=__SCREAMING_SNAKE_CASE ) UpperCamelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.jit.trace( __SCREAMING_SNAKE_CASE , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , "bert.pt" ) ) UpperCamelCase = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , "bert.pt" ) , map_location=__SCREAMING_SNAKE_CASE ) loaded(inputs_dict["input_ids"].to(__SCREAMING_SNAKE_CASE ) , inputs_dict["attention_mask"].to(__SCREAMING_SNAKE_CASE ) ) @require_torch class _lowerCamelCase ( unittest.TestCase ): @slow def snake_case_ (self ) -> Optional[int]: UpperCamelCase = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] UpperCamelCase = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def snake_case_ (self ) -> str: UpperCamelCase = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] UpperCamelCase = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
153
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _A : UpperCamelCase__ : Optional[Union[str, Path]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = 1 UpperCamelCase__ : Optional[Union[str, bool]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
49
0
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A : int = logging.get_logger(__name__) class __A( __UpperCAmelCase ): snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case=125 , _snake_case=None , **_snake_case , ) -> Optional[int]: '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: __a = [F"""<extra_id_{i}>""" for i in range(__SCREAMING_SNAKE_CASE )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __a = len(set(filter(lambda _snake_case : bool('''extra_id''' in str(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the''' ''' extra_ids tokens''' ) __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token __a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token super().__init__( eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , extra_ids=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = extra_ids __a = 2**8 # utf is 8 bits # define special tokens dict __a = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __a = len(self.special_tokens_encoder ) __a = len(__SCREAMING_SNAKE_CASE ) for i, token in enumerate(__SCREAMING_SNAKE_CASE ): __a = self.vocab_size + i - n __a = {v: k for k, v in self.special_tokens_encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> Dict: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any: '''simple docstring''' if len(__SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Union[str, Any]: '''simple docstring''' __a = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> int: '''simple docstring''' __a = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE ) if token_ids_a is None: return token_ids_a else: __a = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE ) return token_ids_a + token_ids_a def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict: '''simple docstring''' __a = [chr(__SCREAMING_SNAKE_CASE ) for i in text.encode('''utf-8''' )] return tokens def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]: '''simple docstring''' if token in self.special_tokens_encoder: __a = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __a = self.added_tokens_encoder[token] elif len(__SCREAMING_SNAKE_CASE ) != 1: __a = self.unk_token_id else: __a = ord(__SCREAMING_SNAKE_CASE ) + self._num_special_tokens return token_id def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' if index in self.special_tokens_decoder: __a = self.special_tokens_decoder[index] else: __a = chr(index - self._num_special_tokens ) return token def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict: '''simple docstring''' __a = B'''''' for token in tokens: if token in self.special_tokens_decoder: __a = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.added_tokens_decoder: __a = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.special_tokens_encoder: __a = token.encode('''utf-8''' ) elif token in self.added_tokens_encoder: __a = token.encode('''utf-8''' ) else: __a = bytes([ord(__SCREAMING_SNAKE_CASE )] ) bstring += tok_string __a = bstring.decode('''utf-8''' , errors='''ignore''' ) return string def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Optional[int]: '''simple docstring''' return ()
6
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Union[str, Any] = logging.get_logger(__name__) __snake_case :Any = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = '''switch_transformers''' UpperCamelCase__ : Optional[Any] = ['''past_key_values'''] UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' __a = vocab_size __a = d_model __a = d_kv __a = d_ff __a = num_sparse_encoder_layers __a = num_layers __a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __a = self.num_layers // self.num_sparse_encoder_layers else: __a = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __a = self.num_decoder_layers // self.num_sparse_decoder_layers else: __a = self.num_decoder_layers # HACK: this will create 0 sparse layers __a = num_heads __a = num_experts __a = expert_capacity __a = router_bias __a = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}') __a = router_dtype __a = router_ignore_padding_tokens __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = dropout_rate __a = layer_norm_epsilon __a = initializer_factor __a = feed_forward_proj __a = use_cache __a = add_router_probs __a = router_z_loss_coef __a = router_aux_loss_coef __a = self.feed_forward_proj.split('''-''') __a = act_info[-1] __a = act_info[0] == '''gated''' if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''') # for backwards compatibility if feed_forward_proj == "gated-gelu": __a = '''gelu_new''' super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
49
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" a__ : Optional[Any] = """ZinengTang/tvlt-base""" a__ : Dict = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Optional[Any]: """simple docstring""" return TvltImageProcessor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Optional[int]: """simple docstring""" return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : Any = self.get_image_processor() a__ : Optional[int] = self.get_feature_extractor() a__ : str = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) a__ : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" a__ : List[str] = self.get_image_processor() a__ : str = self.get_feature_extractor() a__ : List[str] = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) a__ : Optional[int] = np.ones([1_2_0_0_0] ) a__ : List[str] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ) a__ : Any = processor(audio=__SCREAMING_SNAKE_CASE , return_tensors="""np""" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : str = self.get_image_processor() a__ : int = self.get_feature_extractor() a__ : str = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) a__ : List[str] = np.ones([3, 2_2_4, 2_2_4] ) a__ : Optional[Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ) a__ : List[str] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Optional[Any] = self.get_image_processor() a__ : Any = self.get_feature_extractor() a__ : int = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) a__ : Optional[int] = np.ones([1_2_0_0_0] ) a__ : Optional[Any] = np.ones([3, 2_2_4, 2_2_4] ) a__ : Union[str, Any] = processor(audio=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] ) # test if it raises when no input is passed with pytest.raises(__SCREAMING_SNAKE_CASE ): processor() def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" a__ : Optional[int] = self.get_image_processor() a__ : Any = self.get_feature_extractor() a__ : Any = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
170
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex __snake_case :List[Any] = logging.getLogger(__name__) class _A : def __init__( self : List[str]): '''simple docstring''' __a = False def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' if not self.initialized: __a = RagRetriever( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , ) __a = True def _lowerCamelCase ( self : List[str]): '''simple docstring''' self.retriever.index.init_index() def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return doc_ids, retrieved_doc_embeds class _A ( __UpperCAmelCase ): def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None): '''simple docstring''' if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0: raise ValueError( '''When using Ray for distributed fine-tuning, ''' '''you\'ll need to provide the paths instead, ''' '''as the dataset and the index are loaded ''' '''separately. More info in examples/rag/use_own_knowledge_dataset.py ''') super().__init__( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , ) __a = retrieval_workers if len(self.retrieval_workers) > 0: ray.get( [ worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for worker in self.retrieval_workers ]) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' logger.info('''initializing retrieval''') if len(self.retrieval_workers) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' if len(self.retrieval_workers) > 0: # Select a random retrieval actor. __a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)] __a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) else: __a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE) __a = rag_tokenizer.question_encoder __a = rag_tokenizer.generator if indexed_dataset is not None: __a = '''custom''' __a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE) else: __a = cls._build_index(__SCREAMING_SNAKE_CASE) return cls( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
49
0
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) _A : List[str] = '''\ Text data. Second line of data.''' _A : Optional[Any] = '''file''' @pytest.fixture(scope="""session""" ) def UpperCamelCase_ ( snake_case_ : Any ) -> str: '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") __lowerCAmelCase = bytes(_UpperCAmelCase , """utf-8""" ) with zstd.open(_UpperCAmelCase , """wb""" ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def UpperCamelCase_ ( snake_case_ : Union[str, Any] ) -> List[Any]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , """w""" ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def UpperCamelCase_ ( snake_case_ : str , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Dict ) -> int: '''simple docstring''' __lowerCAmelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} __lowerCAmelCase = input_paths[compression_format] __lowerCAmelCase = tmp_path / """cache""" __lowerCAmelCase = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase ) __lowerCAmelCase = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: __lowerCAmelCase = f.read() with open(_UpperCAmelCase ) as f: __lowerCAmelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def UpperCamelCase_ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Tuple ) -> List[Any]: '''simple docstring''' __lowerCAmelCase = """custom_cache""" __lowerCAmelCase = """custom_extracted_dir""" __lowerCAmelCase = tmp_path / """custom_extracted_path""" if default_extracted: __lowerCAmelCase = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _UpperCAmelCase ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_UpperCAmelCase ) ) __lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __lowerCAmelCase = xz_file __lowerCAmelCase = ( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase ) ) __lowerCAmelCase = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def UpperCamelCase_ ( snake_case_ : int ) -> Any: '''simple docstring''' __lowerCAmelCase = str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path __lowerCAmelCase = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> str: '''simple docstring''' __lowerCAmelCase = str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path __lowerCAmelCase = """./__missing_file__.txt""" with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def UpperCamelCase_ ( snake_case_ : Optional[int] ) -> str: '''simple docstring''' __lowerCAmelCase = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_UpperCAmelCase ) as f: __lowerCAmelCase = f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , _UpperCAmelCase ) def UpperCamelCase_ ( ) -> Union[str, Any]: '''simple docstring''' with pytest.raises(_UpperCAmelCase ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , _UpperCAmelCase ) def UpperCamelCase_ ( snake_case_ : Optional[int] ) -> Tuple: '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(_UpperCAmelCase ): http_get("""https://huggingface.co""" , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , _UpperCAmelCase ) def UpperCamelCase_ ( snake_case_ : int ) -> List[Any]: '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(_UpperCAmelCase ): ftp_get("""ftp://huggingface.co""" , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , _UpperCAmelCase ) def UpperCamelCase_ ( snake_case_ : List[str] ) -> Dict: '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(_UpperCAmelCase ): fsspec_get("""s3://huggingface.co""" , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head("""s3://huggingface.co""" )
229
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = BigBirdConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) if is_trivia_qa: __a = BigBirdForQuestionAnswering(_UpperCAmelCase ) else: __a = BigBirdForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--big_bird_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.''' ) __snake_case :Any = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
49
0
def SCREAMING_SNAKE_CASE__ ( __a ): return str(_UpperCAmelCase ) == str(_UpperCAmelCase )[::-1] def SCREAMING_SNAKE_CASE__ ( __a ): return int(_UpperCAmelCase ) + int(str(_UpperCAmelCase )[::-1] ) def SCREAMING_SNAKE_CASE__ ( __a = 1_00_00 ): snake_case_ : Any = [] for num in range(1 , _UpperCAmelCase ): snake_case_ : int = 0 snake_case_ : List[Any] = num while iterations < 50: snake_case_ : Tuple = sum_reverse(_UpperCAmelCase ) iterations += 1 if is_palindrome(_UpperCAmelCase ): break else: lychrel_nums.append(_UpperCAmelCase ) return len(_UpperCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
327
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase ) else: __a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase ) for i, tensor in enumerate(_UpperCAmelCase ): if padding_side == "right": if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] return out_tensor.tolist() def __snake_case ( _UpperCAmelCase ): __a = ord(_UpperCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __a = unicodedata.category(_UpperCAmelCase ) if cat.startswith('''P''' ): return True return False @dataclass class _A ( __UpperCAmelCase ): UpperCamelCase__ : PreTrainedTokenizerBase UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = -100 UpperCamelCase__ : str = "pt" def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' import torch __a = '''label''' if '''label''' in features[0].keys() else '''labels''' __a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __a = self.tokenizer.pad( __SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __a = torch.tensor(batch['''entity_ids''']).shape[1] __a = self.tokenizer.padding_side if padding_side == "right": __a = [ list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels ] else: __a = [ [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels ] __a = [feature['''ner_tags'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = [feature['''original_entity_spans'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()} return batch
49
0
"""simple docstring""" from __future__ import annotations def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
260
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def __snake_case ( ): __a , __a = 9, 14 # noqa: F841 __a = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __a = defaultdict(_UpperCAmelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __a = mst(_UpperCAmelCase ) __a = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __a = tuple(answer[:2] ) __a = tuple(edge[::-1] ) assert edge in result or reverse in result
49
0
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : str=32 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=16 , __lowerCamelCase : Tuple=[1, 2, 1] , __lowerCamelCase : Tuple=[2, 2, 4] , __lowerCamelCase : str=2 , __lowerCamelCase : int=2.0 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.1 , __lowerCamelCase : str="gelu" , __lowerCamelCase : int=False , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Optional[Any]=1E-5 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Optional[int]=8 , ): '''simple docstring''' lowerCamelCase__ : str = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : Union[str, Any] = image_size lowerCamelCase__ : str = patch_size lowerCamelCase__ : List[str] = num_channels lowerCamelCase__ : Union[str, Any] = embed_dim lowerCamelCase__ : Any = depths lowerCamelCase__ : int = num_heads lowerCamelCase__ : Tuple = window_size lowerCamelCase__ : List[Any] = mlp_ratio lowerCamelCase__ : Optional[int] = qkv_bias lowerCamelCase__ : Dict = hidden_dropout_prob lowerCamelCase__ : Dict = attention_probs_dropout_prob lowerCamelCase__ : Any = drop_path_rate lowerCamelCase__ : Dict = hidden_act lowerCamelCase__ : Optional[int] = use_absolute_embeddings lowerCamelCase__ : Union[str, Any] = patch_norm lowerCamelCase__ : List[str] = layer_norm_eps lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : Optional[int] = is_training lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : Dict = type_sequence_label_size lowerCamelCase__ : Tuple = encoder_stride def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : Optional[Any] = None if self.use_labels: lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Any = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : Any ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Tuple = SwinvaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase__ : int = model(__SCREAMING_SNAKE_CASE ) lowerCamelCase__ : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase__ : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any ): '''simple docstring''' lowerCamelCase__ : List[str] = SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ : str = 1 lowerCamelCase__ : Tuple = SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any ): '''simple docstring''' lowerCamelCase__ : Tuple = self.type_sequence_label_size lowerCamelCase__ : str = SwinvaForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs lowerCamelCase__ : str = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase): """simple docstring""" A__ = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) A__ = ( {'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification} if is_torch_available() else {} ) A__ = False A__ = False A__ = False A__ = False def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = SwinvaModelTester(self ) lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def lowerCAmelCase ( self : str ): '''simple docstring''' pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def lowerCAmelCase ( self : int ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[str] = model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase__ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE ) lowerCamelCase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Optional[int] = [*signature.parameters.keys()] lowerCamelCase__ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Any = True for model_class in self.all_model_classes: lowerCamelCase__ : int = True lowerCamelCase__ : Tuple = False lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Any = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) lowerCamelCase__ : Dict = outputs.attentions lowerCamelCase__ : List[str] = len(self.model_tester.depths ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase__ : List[str] = True lowerCamelCase__ : Optional[int] = config.window_size**2 lowerCamelCase__ : Any = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) lowerCamelCase__ : Dict = outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCamelCase__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine lowerCamelCase__ : Dict = True lowerCamelCase__ : List[str] = True lowerCamelCase__ : Dict = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): lowerCamelCase__ : Union[str, Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCamelCase__ : List[Any] = 2 self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) ) lowerCamelCase__ : int = outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : int = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) lowerCamelCase__ : List[str] = outputs.hidden_states lowerCamelCase__ : str = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # Swinv2 has a different seq_length lowerCamelCase__ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase__ : Optional[int] = outputs.reshaped_hidden_states self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = reshaped_hidden_states[0].shape lowerCamelCase__ : int = ( reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCamelCase__ : int = True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Tuple = True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : List[Any] = 3 lowerCamelCase__ : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase__ : List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase__ : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCamelCase__ : int = True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : Union[str, Any] = True self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : str = SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Union[str, Any] = _config_zero_init(__SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: lowerCamelCase__ : Tuple = model_class(config=__SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @cached_property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( __SCREAMING_SNAKE_CASE ) lowerCamelCase__ : Optional[int] = self.default_image_processor lowerCamelCase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCamelCase__ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**__SCREAMING_SNAKE_CASE ) # verify the logits lowerCamelCase__ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) lowerCamelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
184
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] __a = '''fp16''' self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
49
0
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowercase_ ( ): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join SCREAMING_SNAKE_CASE__ : Any = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching ,"""os.path.join""" ,_UpperCAmelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os ,_PatchedModuleObj ) assert isinstance(_test_patching.os.path ,_PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path ,_PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os ,_PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path ,_PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path ,_PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowercase_ ( ): assert _test_patching.open is open SCREAMING_SNAKE_CASE__ : int = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching ,"""open""" ,_UpperCAmelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowercase_ ( ): # pandas.read_csv is not present in _test_patching SCREAMING_SNAKE_CASE__ : Dict = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching ,"""pandas.read_csv""" ,_UpperCAmelCase ): pass def lowercase_ ( ): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point SCREAMING_SNAKE_CASE__ : Tuple = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching ,"""len""" ,_UpperCAmelCase ) is None with patch_submodule(_test_patching ,"""len""" ,_UpperCAmelCase ): assert _test_patching.len is mock assert _test_patching.len is len def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Any = """__test_patch_submodule_start_and_stop_mock__""" SCREAMING_SNAKE_CASE__ : Dict = patch_submodule(_test_patching ,"""open""" ,_UpperCAmelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowercase_ ( ): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join SCREAMING_SNAKE_CASE__ : List[Any] = """__test_patch_submodule_successive_join__""" SCREAMING_SNAKE_CASE__ : Any = """__test_patch_submodule_successive_dirname__""" SCREAMING_SNAKE_CASE__ : Tuple = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching ,"""os.path.join""" ,_UpperCAmelCase ): with patch_submodule(_test_patching ,"""os.rename""" ,_UpperCAmelCase ): with patch_submodule(_test_patching ,"""os.path.dirname""" ,_UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching ,"""os.rename""" ,_UpperCAmelCase ): with patch_submodule(_test_patching ,"""os.path.join""" ,_UpperCAmelCase ): with patch_submodule(_test_patching ,"""os.path.dirname""" ,_UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : List[str] = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching ,"""__module_that_doesn_exist__.__attribute_that_doesn_exist__""" ,_UpperCAmelCase ): pass with patch_submodule(_test_patching ,"""os.__attribute_that_doesn_exist__""" ,_UpperCAmelCase ): pass
25
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __snake_case :Dict = '''bart''' __snake_case :Tuple = True @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): if LOAD_DENSE_INDEX: __a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __a = qar_model.eval() else: __a , __a = (None, None) if MODEL_TYPE == "bart": __a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __a = sas_model.eval() else: __a , __a = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): if LOAD_DENSE_INDEX: __a = faiss.StandardGpuResources() __a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __a = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __a = faiss.IndexFlatIP(128 ) __a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase ) wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU else: __a , __a = (None, None) __a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): __a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __a = elia['''train_eli5'''] __a = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __a = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_UpperCAmelCase ) return (elia_train, eli5_train_q_index) __snake_case ,__snake_case ,__snake_case :List[str] = load_indexes() __snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models() __snake_case ,__snake_case :Tuple = load_train_data() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ): __a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase ) __a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase ) __a = [elia_train[int(_UpperCAmelCase )] for i in I[0]] return nn_examples def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ): if source == "none": __a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __a , __a = query_qa_dense_index( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: __a , __a = query_es_index( _UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , ) __a = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _UpperCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None), } ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ): with torch.no_grad(): __a = qa_sas_generate( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar __snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' __snake_case :int = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __snake_case :int = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) __snake_case :Union[str, Any] = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] __snake_case :int = st.sidebar.checkbox('''Demo options''') if demo_options: __snake_case :str = st.sidebar.selectbox( '''''', action_list, index=3, ) __snake_case :Tuple = action_list.index(action_st) __snake_case :Optional[int] = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) __snake_case :Dict = show_type == '''Show full text of passages''' else: __snake_case :Dict = 3 __snake_case :str = True __snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: __snake_case :List[str] = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) __snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) __snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: __snake_case :Optional[int] = '''wiki40b''' __snake_case :Dict = '''dense''' __snake_case :Dict = '''beam''' __snake_case :int = 2 __snake_case :str = 64 __snake_case :Tuple = 256 __snake_case :int = None __snake_case :List[Any] = None __snake_case :int = st.sidebar.checkbox('''Generation options''') if generate_options: __snake_case :Tuple = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) __snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) __snake_case :Dict = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __snake_case :Dict = st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __snake_case :Tuple = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) __snake_case :Any = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) __snake_case :Any = None # start main text __snake_case :Dict = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] __snake_case :int = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": __snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''') else: __snake_case :Optional[int] = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": __snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10) __snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10) __snake_case :Optional[Any] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __snake_case :Union[str, Any] = support_list[:10] __snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: __snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __snake_case ,__snake_case :Optional[int] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): __snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) __snake_case :int = res[1].strip() if sec_titles == "": __snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url) else: __snake_case :Optional[int] = sec_titles.split(''' & ''') __snake_case :str = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: __snake_case :str = find_nearest_training(question) __snake_case :str = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) __snake_case :Optional[Any] = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) __snake_case :Tuple = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
49
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class a_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""] _SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
58
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _A ( __UpperCAmelCase ): def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = eval_examples __a = post_process_function def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = gen_kwargs.copy() __a = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length ) __a = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams ) __a = gen_kwargs __a = self.eval_dataset if eval_dataset is None else eval_dataset __a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE) __a = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) else: __a = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE) return metrics def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = gen_kwargs.copy() __a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE) # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''') __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
49
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A =logging.get_logger(__name__) class _snake_case ( __UpperCAmelCase ): lowerCAmelCase :Optional[Any] = ['''pixel_values'''] def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(**__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : str = size if size is not None else {"""shortest_edge""": 384} UpperCAmelCase__ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : List[Any] = do_resize UpperCAmelCase__ : Union[str, Any] = size # Default value set here for backwards compatibility where the value in config is None UpperCAmelCase__ : Optional[int] = crop_pct if crop_pct is not None else 224 / 256 UpperCAmelCase__ : str = resample UpperCAmelCase__ : Tuple = do_rescale UpperCAmelCase__ : Optional[int] = rescale_factor UpperCAmelCase__ : List[str] = do_normalize UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ): UpperCAmelCase__ : Any = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''') UpperCAmelCase__ : str = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct UpperCAmelCase__ : Any = int(shortest_edge / crop_pct) UpperCAmelCase__ : List[Any] = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Optional[Any] = resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) else: # warping (no cropping) when evaluated at 384 or larger return resize( __SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ): return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ): return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ): UpperCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : str = crop_pct if crop_pct is not None else self.crop_pct UpperCAmelCase__ : Dict = resample if resample is not None else self.resample UpperCAmelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : Any = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Dict = size if size is not None else self.size UpperCAmelCase__ : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : List[str] = make_list_of_images(__SCREAMING_SNAKE_CASE) if not valid_images(__SCREAMING_SNAKE_CASE): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""") if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""") # All transformations expect numpy arrays. UpperCAmelCase__ : List[str] = [to_numpy_array(__SCREAMING_SNAKE_CASE) for image in images] if do_resize: UpperCAmelCase__ : Union[str, Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , crop_pct=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE) for image in images] if do_rescale: UpperCAmelCase__ : str = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE) for image in images] if do_normalize: UpperCAmelCase__ : List[Any] = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE) for image in images] UpperCAmelCase__ : Tuple = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for image in images] UpperCAmelCase__ : Tuple = {"""pixel_values""": images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE)
163
from __future__ import annotations from typing import Any def __snake_case ( _UpperCAmelCase ): if not postfix_notation: return 0 __a = {'''+''', '''-''', '''*''', '''/'''} __a = [] for token in postfix_notation: if token in operations: __a , __a = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_UpperCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
49
0
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer _a = logging.getLogger(__name__) def __A ( )-> Optional[int]: """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=_UpperCAmelCase , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=_UpperCAmelCase , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=_UpperCAmelCase , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=_UpperCAmelCase , default=1_000 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=_UpperCAmelCase , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=_UpperCAmelCase , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=_UpperCAmelCase , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def __A ( __lowerCAmelCase )-> Optional[Any]: """simple docstring""" def fn(__lowerCAmelCase ): return tokenizer(examples['text'] ) return fn def __A ( __lowerCAmelCase )-> List[str]: """simple docstring""" _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=_UpperCAmelCase ) _UpperCAmelCase = tf.train.Example(features=_UpperCAmelCase ) _UpperCAmelCase = example.SerializeToString() records.append(_UpperCAmelCase ) return records def __A ( __lowerCAmelCase )-> List[str]: """simple docstring""" _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(_UpperCAmelCase ) , args.limit ) _UpperCAmelCase = dataset.select(range(_UpperCAmelCase ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(_UpperCAmelCase ): os.makedirs(_UpperCAmelCase ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(_UpperCAmelCase ) _UpperCAmelCase = dataset.map(_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(__lowerCAmelCase ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , _UpperCAmelCase , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(_UpperCAmelCase , batched=_UpperCAmelCase , batch_size=1_000 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(_UpperCAmelCase ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(_UpperCAmelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) _UpperCAmelCase = get_serialized_examples(_UpperCAmelCase ) with tf.io.TFRecordWriter(_UpperCAmelCase ) as out_file: for i in range(len(_UpperCAmelCase ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(_UpperCAmelCase ) print('Wrote file {} containing {} records'.format(_UpperCAmelCase , _UpperCAmelCase ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""" , 'w' ) as f: print(F"""Total {args.split} records: {total_records}""" , file=_UpperCAmelCase ) if __name__ == "__main__": _a = parse_args() main(args)
39
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __snake_case :Optional[int] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __snake_case :List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __snake_case :List[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = random.randint(0 , len(_UpperCAmelCase ) - 1 ) __a = parent_a[:random_slice] + parent_a[random_slice:] __a = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __a = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): __a = [] # Generate more children proportionally to the fitness score. __a = int(parent_a[1] * 100 ) + 1 __a = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): __a = population_score[random.randint(0 , _UpperCAmelCase )][0] __a , __a = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __a = f'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. __a = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __a = f'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. __a = [] for _ in range(_UpperCAmelCase ): population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. __a , __a = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. __a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'\nGeneration: {generation}' f'\nTotal Population:{total_population}' f'\nBest score: {population_score[0][1]}' f'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __a = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. __a = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __snake_case :Optional[int] = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __snake_case :List[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
49
0
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ = list[tuple[int, int]] lowerCAmelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class _lowerCamelCase : def __init__(self , __a , __a , __a , __a , __a , __a , ) -> Union[str, Any]: UpperCamelCase = pos_x UpperCamelCase = pos_y UpperCamelCase = (pos_y, pos_x) UpperCamelCase = goal_x UpperCamelCase = goal_y UpperCamelCase = g_cost UpperCamelCase = parent UpperCamelCase = self.calculate_heuristic() def snake_case_ (self ) -> Tuple: UpperCamelCase = abs(self.pos_x - self.goal_x ) UpperCamelCase = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__(self , __a ) -> List[Any]: return self.f_cost < other.f_cost class _lowerCamelCase : def __init__(self , __a , __a ) -> Dict: UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __SCREAMING_SNAKE_CASE ) UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __SCREAMING_SNAKE_CASE ) UpperCamelCase = [self.start] UpperCamelCase = [] UpperCamelCase = False def snake_case_ (self ) -> Optional[int]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCamelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: UpperCamelCase = True return self.retrace_path(__SCREAMING_SNAKE_CASE ) self.closed_nodes.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase = self.get_successors(__SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) else: # retrieve the best current path UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def snake_case_ (self , __a ) -> List[str]: UpperCamelCase = [] for action in delta: UpperCamelCase = parent.pos_x + action[1] UpperCamelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __SCREAMING_SNAKE_CASE , ) ) return successors def snake_case_ (self , __a ) -> Any: UpperCamelCase = node UpperCamelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCamelCase = current_node.parent path.reverse() return path if __name__ == "__main__": lowerCAmelCase__ = (0, 0) lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') lowerCAmelCase__ = GreedyBestFirst(init, goal) lowerCAmelCase__ = greedy_bf.search() if path: for pos_x, pos_y in path: lowerCAmelCase__ = 2 for elem in grid: print(elem)
153
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = LxmertConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) __a = LxmertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": __snake_case :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __snake_case :Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
49
0
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A : int = logging.get_logger(__name__) A : Union[str, Any] = '''▁''' A : List[str] = {'''vocab_file''': '''prophetnet.tokenizer'''} A : List[str] = { '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } A : Dict = { '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } A : str = { '''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2, } def __lowerCAmelCase ( a__ ) -> Tuple: __a = collections.OrderedDict() with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as reader: __a = reader.readlines() for index, token in enumerate(_UpperCAmelCase ): __a = token.rstrip('''\n''' ) __a = index return vocab class __A( __UpperCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self , _snake_case , _snake_case="[SEP]" , _snake_case="[SEP]" , _snake_case="[SEP]" , _snake_case="[UNK]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ) -> Tuple: '''simple docstring''' __a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) ) __a = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __a = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4} for i in range(10 ): __a = F"""[unused{i}]""" __a = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __a = 12 __a = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(__SCREAMING_SNAKE_CASE ) def __getstate__( self ) -> Union[str, Any]: '''simple docstring''' __a = self.__dict__.copy() __a = None return state def __setstate__( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __a = {} __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[Any]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is None: return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Optional[Any]: '''simple docstring''' __a = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any: '''simple docstring''' return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __a = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Any: '''simple docstring''' if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __a = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: __a = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[Any]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] __a = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
6
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ): __a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __a = f'{olid} is not a valid Open Library olid' raise ValueError(_UpperCAmelCase ) return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json() def __snake_case ( _UpperCAmelCase ): __a = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __a = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __a = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = ''', '''.join(_UpperCAmelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(f'\nSearching Open Library for ISBN: {isbn}...\n') try: __snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}')) print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'Sorry, there are no results for ISBN: {isbn}.')
49
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : str ={ '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] =[ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _lowercase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
170
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _A ( __UpperCAmelCase ): def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths} __a = Text( cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[str]): '''simple docstring''' if self.streaming: __a = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: __a = None __a = None __a = None __a = None self.builder.download_and_prepare( download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) __a = self.builder.as_dataset( split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory) return dataset
49
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _lowercase ( unittest.TestCase ): '''simple docstring''' @property def a ( self : Dict ) -> List[Any]: torch.manual_seed(0 ) __lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def a ( self : Dict ) -> Optional[Any]: __lowerCAmelCase = self.dummy_uncond_unet __lowerCAmelCase = PNDMScheduler() __lowerCAmelCase = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pndm.to(__SCREAMING_SNAKE_CASE ) pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" ).images __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" , return_dict=__SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _lowercase ( unittest.TestCase ): '''simple docstring''' def a ( self : Any ) -> str: __lowerCAmelCase = """google/ddpm-cifar10-32""" __lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = PNDMScheduler() __lowerCAmelCase = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pndm.to(__SCREAMING_SNAKE_CASE ) pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pndm(generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
229
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __snake_case :List[str] = '''\ Text data. Second line of data.''' __snake_case :Optional[Any] = '''file''' @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') __a = bytes(_UpperCAmelCase , '''utf-8''' ) with zstd.open(_UpperCAmelCase , '''wb''' ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def __snake_case ( _UpperCAmelCase ): with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} __a = input_paths[compression_format] __a = tmp_path / '''cache''' __a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: __a = f.read() with open(_UpperCAmelCase ) as f: __a = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''custom_cache''' __a = '''custom_extracted_dir''' __a = tmp_path / '''custom_extracted_path''' if default_extracted: __a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) ) __a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __a = xz_file __a = ( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase ) ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path __a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path __a = '''./__missing_file__.txt''' with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = get_from_cache(f'tmp://{tmpfs_file}' ) with open(_UpperCAmelCase ) as f: __a = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( ): with pytest.raises(_UpperCAmelCase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head('''s3://huggingface.co''' )
49
0
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase , unittest.TestCase ): __magic_name__: Tuple = CodeGenTokenizer __magic_name__: Tuple = CodeGenTokenizerFast __magic_name__: int = True __magic_name__: List[str] = {'''add_prefix_space''': True} __magic_name__: str = False def UpperCAmelCase_ ( self : Optional[int] ) -> str: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : Tuple = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] snake_case_ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) snake_case_ : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] snake_case_ : str = {'unk_token': '<unk>'} snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase_ ( self : Any , **_A : Optional[int] ) -> Optional[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : Tuple , **_A : Optional[int] ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : List[Any] , _A : str ) -> Optional[int]: """simple docstring""" snake_case_ : List[str] = 'lower newer' snake_case_ : Any = 'lower newer' return input_text, output_text def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ : int = 'lower newer' snake_case_ : Union[str, Any] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] snake_case_ : Optional[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return snake_case_ : Dict = self.get_tokenizer() snake_case_ : str = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = 'lower newer' # Testing tokenization snake_case_ : Optional[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) snake_case_ : str = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens snake_case_ : List[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens snake_case_ : Dict = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) snake_case_ : Any = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Testing the unknown token snake_case_ : Optional[Any] = tokens + [rust_tokenizer.unk_token] snake_case_ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Dict: """simple docstring""" pass def UpperCAmelCase_ ( self : str , _A : str=15 ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # Simple input snake_case_ : int = 'This is a simple input' snake_case_ : List[str] = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ : Any = ('This is a simple input', 'This is a pair') snake_case_ : int = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' ) # Simple input self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' ) # Simple input self.assertRaises( __SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' , ) # Pair input self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' ) # Pair input self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' ) # Pair input self.assertRaises( __SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' , ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" snake_case_ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input snake_case_ : List[str] = 'This is a simple input' snake_case_ : Optional[int] = ['This is a simple input looooooooong', 'This is a simple input'] snake_case_ : Dict = ('This is a simple input', 'This is a pair') snake_case_ : Optional[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] snake_case_ : Dict = tokenizer.pad_token_id snake_case_ : Optional[int] = tokenizer(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=30 , return_tensors='np' ) snake_case_ : str = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors='np' ) snake_case_ : int = tokenizer(*__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=60 , return_tensors='np' ) snake_case_ : Dict = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = '$$$' snake_case_ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = 'This is a simple input' snake_case_ : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ : int = tokenizer.bos_token_id snake_case_ : List[str] = tokenizer(__SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = tokenizer(__SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , __SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) snake_case_ : List[Any] = tokenizer.decode(out_s.input_ids ) snake_case_ : Dict = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCAmelCase_ ( self : Tuple ) -> str: """simple docstring""" snake_case_ : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' ) snake_case_ : Any = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' snake_case_ : int = '\nif len_a > len_b: result = a\nelse: result = b' snake_case_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE ) snake_case_ : int = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n'] snake_case_ : List[Any] = tokenizer.decode(__SCREAMING_SNAKE_CASE , truncate_before_pattern=__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass
327
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = (DDPMParallelScheduler,) def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE) return config def _lowerCamelCase ( self : List[str]): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5 def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = self.dummy_sample_deter + 0.1 __a = self.dummy_sample_deter - 0.1 __a = samplea.shape[0] __a = torch.stack([samplea, samplea, samplea] , dim=0) __a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE) __a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) __a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1)) __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 11_53.18_33) < 1E-2 assert abs(result_mean.item() - 0.50_05) < 1E-3 def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_58.96_06) < 1E-2 assert abs(result_mean.item() - 0.33_72) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(prediction_type='''v_prediction''') __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_02.02_96) < 1E-2 assert abs(result_mean.item() - 0.26_31) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) __a = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE): if i == len(__SCREAMING_SNAKE_CASE) - 1: __a = -1 else: __a = timesteps[i + 1] __a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE) __a = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] __a = len(__SCREAMING_SNAKE_CASE) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __A : Union[str, Any] = logging.getLogger() def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = '''\n'''.join(_UpperCAmelCase ) Path(_UpperCAmelCase ).open('''w''' ).writelines(_UpperCAmelCase ) __A : Tuple = '''patrickvonplaten/t5-tiny-random''' __A : List[Any] = '''sshleifer/bart-tiny-random''' __A : int = '''sshleifer/tiny-mbart''' __A : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _a ( __UpperCAmelCase): """simple docstring""" def lowercase__ ( self : str , __UpperCamelCase : Dict )->Any: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _UpperCAmelCase = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _UpperCAmelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) _UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _UpperCAmelCase = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): run_generate() assert Path(__SCREAMING_SNAKE_CASE ).exists() # os.remove(Path(output_file_name)) def lowercase__ ( self : Tuple )->str: self.run_eval_tester(__SCREAMING_SNAKE_CASE ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict )->List[Any]: self.run_eval_tester(__SCREAMING_SNAKE_CASE ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowercase__ ( self : str , __UpperCamelCase : Any )->Union[str, Any]: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _UpperCAmelCase = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _UpperCAmelCase = { '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / '''scores.json''' ) _UpperCAmelCase = str(tmp_dir / '''val.target''' ) _dump_articles(__SCREAMING_SNAKE_CASE , text['''en'''] ) _dump_articles(__SCREAMING_SNAKE_CASE , text['''de'''] ) _UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _UpperCAmelCase = F'\n run_eval_search.py\n {model}\n {str(__SCREAMING_SNAKE_CASE )}\n {str(__SCREAMING_SNAKE_CASE )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [''' num_beams | length_penalty''', model, '''Best score args'''] _UpperCAmelCase = ['''Info'''] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(__SCREAMING_SNAKE_CASE ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(__SCREAMING_SNAKE_CASE ).exists() os.remove(Path(__SCREAMING_SNAKE_CASE ) )
260
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __snake_case :List[Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) requires_backends(self , '''vision''') requires_backends(self , '''torch''') if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.') self.check_model_type(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = {} __a = {} __a = {} # preprocess args if "points_per_batch" in kwargs: __a = kwargs['''points_per_batch'''] if "points_per_crop" in kwargs: __a = kwargs['''points_per_crop'''] if "crops_n_layers" in kwargs: __a = kwargs['''crops_n_layers'''] if "crop_overlap_ratio" in kwargs: __a = kwargs['''crop_overlap_ratio'''] if "crop_n_points_downscale_factor" in kwargs: __a = kwargs['''crop_n_points_downscale_factor'''] # postprocess args if "pred_iou_thresh" in kwargs: __a = kwargs['''pred_iou_thresh'''] if "stability_score_offset" in kwargs: __a = kwargs['''stability_score_offset'''] if "mask_threshold" in kwargs: __a = kwargs['''mask_threshold'''] if "stability_score_thresh" in kwargs: __a = kwargs['''stability_score_thresh'''] if "crops_nms_thresh" in kwargs: __a = kwargs['''crops_nms_thresh'''] if "output_rle_mask" in kwargs: __a = kwargs['''output_rle_mask'''] if "output_bboxes_mask" in kwargs: __a = kwargs['''output_bboxes_mask'''] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ): '''simple docstring''' __a = load_image(__SCREAMING_SNAKE_CASE) __a = self.image_processor.size['''longest_edge'''] __a , __a , __a , __a = self.image_processor.generate_crop_boxes( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') with self.device_placement(): if self.framework == "pt": __a = self.get_inference_context() with inference_context(): __a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''')) __a = image_embeddings __a = grid_points.shape[1] __a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( '''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ''' '''To return all points at once, set points_per_batch to None''') for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = grid_points[:, i : i + points_per_batch, :, :] __a = input_labels[:, i : i + points_per_batch] __a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ): '''simple docstring''' __a = model_inputs.pop('''input_boxes''') __a = model_inputs.pop('''is_last''') __a = model_inputs.pop('''original_sizes''').tolist() __a = model_inputs.pop('''reshaped_input_sizes''').tolist() __a = self.model(**__SCREAMING_SNAKE_CASE) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __a = model_outputs['''pred_masks'''] __a = self.image_processor.post_process_masks( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE) __a = model_outputs['''iou_scores'''] __a , __a , __a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ): '''simple docstring''' __a = [] __a = [] __a = [] for model_output in model_outputs: all_scores.append(model_output.pop('''iou_scores''')) all_masks.extend(model_output.pop('''masks''')) all_boxes.append(model_output.pop('''boxes''')) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a , __a , __a , __a = self.image_processor.post_process_for_mask_generation( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = defaultdict(__SCREAMING_SNAKE_CASE) for output in model_outputs: for k, v in output.items(): extra[k].append(__SCREAMING_SNAKE_CASE) __a = {} if output_rle_mask: __a = rle_mask if output_bboxes_mask: __a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
49
0
class _lowercase : """simple docstring""" def __init__( self : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = n lowerCamelCase__ : Tuple = [None] * self.n lowerCamelCase__ : Union[str, Any] = 0 # index of the first element lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Optional[int] = 0 def __len__( self : str ): '''simple docstring''' return self.size def lowerCAmelCase ( self : int ): '''simple docstring''' return self.size == 0 def lowerCAmelCase ( self : Dict ): '''simple docstring''' return False if self.is_empty() else self.array[self.front] def lowerCAmelCase ( self : str , __lowerCamelCase : Optional[Any] ): '''simple docstring''' if self.size >= self.n: raise Exception("QUEUE IS FULL" ) lowerCamelCase__ : Dict = data lowerCamelCase__ : Dict = (self.rear + 1) % self.n self.size += 1 return self def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' if self.size == 0: raise Exception("UNDERFLOW" ) lowerCamelCase__ : Optional[Any] = self.array[self.front] lowerCamelCase__ : List[str] = None lowerCamelCase__ : List[Any] = (self.front + 1) % self.n self.size -= 1 return temp
184
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __snake_case :str = logging.get_logger(__name__) __snake_case :int = {'''vocab_file''': '''vocab.txt'''} __snake_case :List[Any] = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __snake_case :List[str] = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __snake_case :Optional[int] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : int = ConvBertTokenizer def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars ): __a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''')) __a = do_lower_case __a = strip_accents __a = tokenize_chinese_chars __a = normalizer_class(**__SCREAMING_SNAKE_CASE) __a = do_lower_case def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None): '''simple docstring''' __a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' __a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE) return tuple(__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import socket def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Tuple = socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) SCREAMING_SNAKE_CASE__ : Any = socket.gethostname() SCREAMING_SNAKE_CASE__ : List[Any] = 12_312 sock.connect((host, port) ) sock.send(b"""Hello server!""" ) with open("""Received_file""" ,"""wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: SCREAMING_SNAKE_CASE__ : List[Any] = sock.recv(1_024 ) if not data: break out_file.write(_UpperCAmelCase ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
25
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case :Any = logging.get_logger(__name__) __snake_case :Optional[Any] = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } __snake_case :List[Any] = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def __snake_case ( _UpperCAmelCase ): __a = EfficientNetConfig() __a = CONFIG_MAP[model_name]['''hidden_dim'''] __a = CONFIG_MAP[model_name]['''width_coef'''] __a = CONFIG_MAP[model_name]['''depth_coef'''] __a = CONFIG_MAP[model_name]['''image_size'''] __a = CONFIG_MAP[model_name]['''dropout_rate'''] __a = CONFIG_MAP[model_name]['''dw_padding'''] __a = '''huggingface/label-files''' __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def __snake_case ( ): __a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im def __snake_case ( _UpperCAmelCase ): __a = CONFIG_MAP[model_name]['''image_size'''] __a = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , ) return preprocessor def __snake_case ( _UpperCAmelCase ): __a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] __a = sorted(set(_UpperCAmelCase ) ) __a = len(_UpperCAmelCase ) __a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )} __a = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: __a = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) __a = {} for item in rename_keys: if item[0] in original_param_names: __a = '''efficientnet.''' + item[1] __a = '''classifier.weight''' __a = '''classifier.bias''' return key_mapping def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for key, value in tf_params.items(): if "normalization" in key: continue __a = key_mapping[key] if "_conv" in key and "kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __a = torch.from_numpy(np.transpose(_UpperCAmelCase ) ) else: __a = torch.from_numpy(_UpperCAmelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCAmelCase ) @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = model_classes[model_name]( include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , ) __a = original_model.trainable_variables __a = original_model.non_trainable_variables __a = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __a = param.numpy() __a = list(tf_params.keys() ) # Load HuggingFace model __a = get_efficientnet_config(_UpperCAmelCase ) __a = EfficientNetForImageClassification(_UpperCAmelCase ).eval() __a = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) __a = rename_keys(_UpperCAmelCase ) replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Initialize preprocessor and preprocess input image __a = convert_image_processor(_UpperCAmelCase ) __a = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): __a = hf_model(**_UpperCAmelCase ) __a = outputs.logits.detach().numpy() # Original model inference __a = False __a = CONFIG_MAP[model_name]['''image_size'''] __a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __a = image.img_to_array(_UpperCAmelCase ) __a = np.expand_dims(_UpperCAmelCase , axis=0 ) __a = original_model.predict(_UpperCAmelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCAmelCase ): os.mkdir(_UpperCAmelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCAmelCase ) preprocessor.save_pretrained(_UpperCAmelCase ) if push_to_hub: # Push model and image processor to hub print(f'Pushing converted {model_name} to the hub...' ) __a = f'efficientnet-{model_name}' preprocessor.push_to_hub(_UpperCAmelCase ) hf_model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') __snake_case :Optional[int] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
49
0
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( __lowerCamelCase : Dict ) ->List[str]: _SCREAMING_SNAKE_CASE = [True] * limit _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): _SCREAMING_SNAKE_CASE = i * 2 while index < limit: _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = index + i _SCREAMING_SNAKE_CASE = [2] for i in range(3 , _UpperCAmelCase , 2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def lowerCamelCase ( __lowerCamelCase : int = 100_0000 ) ->List[str]: _SCREAMING_SNAKE_CASE = prime_sieve(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 for i in range(len(_UpperCAmelCase ) ): for j in range(i + length , len(_UpperCAmelCase ) ): _SCREAMING_SNAKE_CASE = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: _SCREAMING_SNAKE_CASE = j - i _SCREAMING_SNAKE_CASE = sol return largest if __name__ == "__main__": print(f"""{solution() = }""")
58
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case :Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(f'{len(upper_files)} files contain uppercase characters:') print('''\n'''.join(upper_files) + '''\n''') __snake_case :Tuple = [file for file in filepaths if ''' ''' in file] if space_files: print(f'{len(space_files)} files contain space characters:') print('''\n'''.join(space_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if '''-''' in file] if hyphen_files: print(f'{len(hyphen_files)} files contain hyphen characters:') print('''\n'''.join(hyphen_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'{len(nodir_files)} files are not in a directory:') print('''\n'''.join(nodir_files) + '''\n''') __snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
49
0
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class _snake_case ( __UpperCAmelCase , unittest.TestCase ): lowerCAmelCase :Optional[Any] = CpmAntTokenizer lowerCAmelCase :Tuple = False def snake_case__ ( self): super().setUp() UpperCAmelCase__ : Optional[int] = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] UpperCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) @tooslow def snake_case__ ( self): UpperCAmelCase__ : List[str] = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""") UpperCAmelCase__ : int = """今天天气真好!""" UpperCAmelCase__ : str = ["""今天""", """天气""", """真""", """好""", """!"""] UpperCAmelCase__ : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Tuple = """今天天气真好!""" UpperCAmelCase__ : List[Any] = [tokenizer.bos_token] + tokens UpperCAmelCase__ : Any = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Optional[Any] = tokenizer.decode(__SCREAMING_SNAKE_CASE) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
163
from collections import defaultdict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = first_str.lower().strip() __a = second_str.lower().strip() # Remove whitespace __a = first_str.replace(''' ''' , '''''' ) __a = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False # Default values for count should be 0 __a = defaultdict(_UpperCAmelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(_UpperCAmelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __snake_case :Any = input('''Enter the first string ''').strip() __snake_case :int = input('''Enter the second string ''').strip() __snake_case :int = check_anagrams(input_a, input_b) print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
49
0
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCamelCase ( __UpperCAmelCase): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase="None" , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ): """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = relative_attention _UpperCAmelCase = position_biased_input _UpperCAmelCase = pos_att_type _UpperCAmelCase = scope def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self ): """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.get_config() _UpperCAmelCase = 300 return config def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = DebertaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] _UpperCAmelCase = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] _UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = DebertaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCamelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCamelCase ( self ): """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCamelCase ( unittest.TestCase): """simple docstring""" @unittest.skip(reason='Model not available yet' ) def UpperCamelCase ( self ): """simple docstring""" pass @slow def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = DebertaModel.from_pretrained('microsoft/deberta-base' ) _UpperCAmelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _UpperCAmelCase = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
39
import logging from transformers.configuration_utils import PretrainedConfig __snake_case :Any = logging.getLogger(__name__) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = '''masked_bert''' def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = pruning_method __a = mask_init __a = mask_scale
49
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
153
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _A : UpperCamelCase__ : Optional[Union[str, Path]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = 1 UpperCamelCase__ : Optional[Union[str, bool]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
49
0
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __A: def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=24 , _snake_case=2 , _snake_case=6 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , _snake_case=1_000 , ) -> int: '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = use_token_type_ids __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = type_sequence_label_size __a = initializer_range __a = num_labels __a = scope __a = range_bbox def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a = bbox[i, j, 3] __a = bbox[i, j, 1] __a = t if bbox[i, j, 2] < bbox[i, j, 0]: __a = bbox[i, j, 2] __a = bbox[i, j, 0] __a = t __a = None if self.use_input_mask: __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __a = None if self.use_token_type_ids: __a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a = None __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Dict: '''simple docstring''' __a = LiltModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Union[str, Any]: '''simple docstring''' __a = self.num_labels __a = LiltForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __a = model( __SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Optional[int]: '''simple docstring''' __a = LiltForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __a = model( __SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = config_and_inputs __a = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __A( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): snake_case_ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) snake_case_ = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> int: '''simple docstring''' return True def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = LiltModelTester(self ) __a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = LiltModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_torch @slow class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__SCREAMING_SNAKE_CASE ) __a = torch.tensor([[1, 2]] , device=__SCREAMING_SNAKE_CASE ) __a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __a = model(input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE ) __a = torch.Size([1, 2, 768] ) __a = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__SCREAMING_SNAKE_CASE , ) self.assertTrue(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
6
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Union[str, Any] = logging.get_logger(__name__) __snake_case :Any = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = '''switch_transformers''' UpperCamelCase__ : Optional[Any] = ['''past_key_values'''] UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' __a = vocab_size __a = d_model __a = d_kv __a = d_ff __a = num_sparse_encoder_layers __a = num_layers __a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __a = self.num_layers // self.num_sparse_encoder_layers else: __a = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __a = self.num_decoder_layers // self.num_sparse_decoder_layers else: __a = self.num_decoder_layers # HACK: this will create 0 sparse layers __a = num_heads __a = num_experts __a = expert_capacity __a = router_bias __a = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}') __a = router_dtype __a = router_ignore_padding_tokens __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = dropout_rate __a = layer_norm_epsilon __a = initializer_factor __a = feed_forward_proj __a = use_cache __a = add_router_probs __a = router_z_loss_coef __a = router_aux_loss_coef __a = self.feed_forward_proj.split('''-''') __a = act_info[-1] __a = act_info[0] == '''gated''' if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''') # for backwards compatibility if feed_forward_proj == "gated-gelu": __a = '''gelu_new''' super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
49
0
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Tuple: """simple docstring""" a__ : List[str] = jnp.ones((batch_size, length) ) / length return scores def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" a__ : Tuple = None a__ : str = 2_0 a__ : Tuple = self._get_uniform_logits(batch_size=2 , length=__SCREAMING_SNAKE_CASE ) # tweak scores to not be uniform anymore a__ : List[str] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch a__ : Any = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax a__ : str = jax.nn.softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) a__ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 ) a__ : List[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 ) a__ : Tuple = jax.nn.softmax(temp_dist_warper_sharper(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 ) a__ : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : Optional[Any] = None a__ : Dict = 1_0 a__ : List[Any] = 2 # create ramp distribution a__ : Any = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() a__ : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size a__ : int = FlaxTopKLogitsWarper(3 ) a__ : Any = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case a__ : int = 5 a__ : Union[str, Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) a__ : List[Any] = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy() a__ : List[str] = top_k_warp_safety_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : Optional[int] = None a__ : Any = 1_0 a__ : Any = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) a__ : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) ) a__ : Optional[Any] = FlaxTopPLogitsWarper(0.8 ) a__ : Optional[Any] = np.exp(top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 a__ : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] ) self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # check edge cases with negative and extreme logits a__ : str = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme a__ : Optional[int] = ramp_logits[1] * 1_0_0.0 # make sure at least 2 tokens are kept a__ : List[str] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) a__ : Union[str, Any] = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" a__ : int = 2_0 a__ : List[Any] = 4 a__ : List[Any] = 0 a__ : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__SCREAMING_SNAKE_CASE ) # check that min length is applied at length 5 a__ : Any = ids_tensor((batch_size, 2_0) , vocab_size=2_0 ) a__ : List[Any] = 5 a__ : Any = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : List[str] = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 a__ : Optional[int] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : Any = 1_5 a__ : Any = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : Union[str, Any] = 2_0 a__ : List[Any] = 4 a__ : Optional[Any] = 0 a__ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE ) # check that all scores are -inf except the bos_token_id score a__ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=2_0 ) a__ : List[Any] = 1 a__ : Optional[int] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : Dict = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 a__ : List[str] = 3 a__ : Dict = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : int = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() ) def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" a__ : Optional[int] = 2_0 a__ : Union[str, Any] = 4 a__ : str = 0 a__ : Tuple = 5 a__ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) # check that all scores are -inf except the eos_token_id when max_length is reached a__ : List[str] = ids_tensor((batch_size, 4) , vocab_size=2_0 ) a__ : Optional[Any] = 4 a__ : List[Any] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : Any = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached a__ : List[str] = 3 a__ : str = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : int = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() ) def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : Optional[Any] = 4 a__ : int = 1_0 a__ : Dict = 1_5 a__ : List[Any] = 2 a__ : int = 1 a__ : Optional[int] = 1_5 # dummy input_ids and scores a__ : Optional[int] = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE ) a__ : Tuple = input_ids.copy() a__ : Union[str, Any] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : Any = scores.copy() # instantiate all dist processors a__ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 ) a__ : List[str] = FlaxTopKLogitsWarper(3 ) a__ : Union[str, Any] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors a__ : Tuple = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__SCREAMING_SNAKE_CASE ) a__ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE ) a__ : int = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) a__ : List[str] = 1_0 # no processor list a__ : Dict = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : Optional[int] = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : List[Any] = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : Optional[Any] = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : Tuple = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : Tuple = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # with processor list a__ : Union[str, Any] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) a__ : Tuple = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # scores should be equal self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = 4 a__ : Union[str, Any] = 1_0 a__ : Union[str, Any] = 1_5 a__ : Any = 2 a__ : Optional[Any] = 1 a__ : Tuple = 1_5 # dummy input_ids and scores a__ : int = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE ) a__ : Union[str, Any] = input_ids.copy() a__ : Optional[int] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : List[str] = scores.copy() # instantiate all dist processors a__ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) a__ : Optional[int] = FlaxTopKLogitsWarper(3 ) a__ : List[str] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors a__ : int = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__SCREAMING_SNAKE_CASE ) a__ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE ) a__ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) a__ : Optional[Any] = 1_0 # no processor list def run_no_processor_list(__lowercase , __lowercase , __lowercase ): a__ : List[Any] = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : Dict = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : Optional[Any] = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : Dict = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : List[str] = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) a__ : List[str] = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) return scores # with processor list def run_processor_list(__lowercase , __lowercase , __lowercase ): a__ : List[str] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) a__ : Union[str, Any] = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) return scores a__ : Dict = jax.jit(__SCREAMING_SNAKE_CASE ) a__ : Dict = jax.jit(__SCREAMING_SNAKE_CASE ) a__ : List[Any] = jitted_run_no_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : Union[str, Any] = jitted_run_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # scores should be equal self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
170
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex __snake_case :List[Any] = logging.getLogger(__name__) class _A : def __init__( self : List[str]): '''simple docstring''' __a = False def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' if not self.initialized: __a = RagRetriever( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , ) __a = True def _lowerCamelCase ( self : List[str]): '''simple docstring''' self.retriever.index.init_index() def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return doc_ids, retrieved_doc_embeds class _A ( __UpperCAmelCase ): def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None): '''simple docstring''' if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0: raise ValueError( '''When using Ray for distributed fine-tuning, ''' '''you\'ll need to provide the paths instead, ''' '''as the dataset and the index are loaded ''' '''separately. More info in examples/rag/use_own_knowledge_dataset.py ''') super().__init__( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , ) __a = retrieval_workers if len(self.retrieval_workers) > 0: ray.get( [ worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for worker in self.retrieval_workers ]) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' logger.info('''initializing retrieval''') if len(self.retrieval_workers) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' if len(self.retrieval_workers) > 0: # Select a random retrieval actor. __a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)] __a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) else: __a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE) __a = rag_tokenizer.question_encoder __a = rag_tokenizer.generator if indexed_dataset is not None: __a = '''custom''' __a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE) else: __a = cls._build_index(__SCREAMING_SNAKE_CASE) return cls( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
49
0
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _A : Tuple = 16 _A : Tuple = 32 def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : Dict = 16 , snake_case_ : List[str] = "bert-base-cased" ) -> Tuple: '''simple docstring''' __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) __lowerCAmelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case_ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowerCAmelCase = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_UpperCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case_ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_UpperCAmelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" ) return tokenizer.pad(_UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __lowerCAmelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) __lowerCAmelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Dict ) -> int: '''simple docstring''' model.eval() __lowerCAmelCase = 0 for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCAmelCase = model(**_UpperCAmelCase ) __lowerCAmelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __lowerCAmelCase , __lowerCAmelCase = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_UpperCAmelCase ) - 1: __lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) __lowerCAmelCase = metric.compute() return eval_metric["accuracy"] def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : Dict ) -> Tuple: '''simple docstring''' __lowerCAmelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase = config["""lr"""] __lowerCAmelCase = int(config["""num_epochs"""] ) __lowerCAmelCase = int(config["""seed"""] ) __lowerCAmelCase = int(config["""batch_size"""] ) __lowerCAmelCase = args.model_name_or_path set_seed(_UpperCAmelCase ) __lowerCAmelCase , __lowerCAmelCase = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase , return_dict=_UpperCAmelCase ) # Instantiate optimizer __lowerCAmelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=_UpperCAmelCase ) if accelerator.state.deepspeed_plugin is not None: __lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: __lowerCAmelCase = 1 __lowerCAmelCase = (len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=0 , num_training_steps=_UpperCAmelCase , ) else: __lowerCAmelCase = DummyScheduler(_UpperCAmelCase , total_num_steps=_UpperCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # We need to keep track of how many total steps we have iterated over __lowerCAmelCase = 0 # We also need to keep track of the stating epoch so files are named properly __lowerCAmelCase = 0 __lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" ) __lowerCAmelCase = num_epochs if args.partial_train_epoch is not None: __lowerCAmelCase = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __lowerCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1] __lowerCAmelCase = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __lowerCAmelCase = int(_UpperCAmelCase ) + 1 __lowerCAmelCase = evaluation_loop(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) accelerator.print("""resumed checkpoint performance:""" , _UpperCAmelCase ) accelerator.print("""resumed checkpoint\'s scheduler\'s lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers\'s lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f: __lowerCAmelCase = json.load(_UpperCAmelCase ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __lowerCAmelCase = {} for epoch in range(_UpperCAmelCase , _UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): __lowerCAmelCase = model(**_UpperCAmelCase ) __lowerCAmelCase = outputs.loss __lowerCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __lowerCAmelCase = f"""epoch_{epoch}""" __lowerCAmelCase = os.path.join(args.output_dir , _UpperCAmelCase ) accelerator.save_state(_UpperCAmelCase ) __lowerCAmelCase = evaluation_loop(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __lowerCAmelCase = accuracy __lowerCAmelCase = lr_scheduler.get_lr()[0] __lowerCAmelCase = optimizer.param_groups[0]["""lr"""] __lowerCAmelCase = epoch __lowerCAmelCase = overall_step accelerator.print(f"""epoch {epoch}:""" , _UpperCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase ) def UpperCamelCase_ ( ) -> List[Any]: '''simple docstring''' __lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=_UpperCAmelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_UpperCAmelCase , ) parser.add_argument( """--output_dir""" , type=_UpperCAmelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=_UpperCAmelCase , default=2 , help="""Number of train epochs.""" , ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
229
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = BigBirdConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) if is_trivia_qa: __a = BigBirdForQuestionAnswering(_UpperCAmelCase ) else: __a = BigBirdForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--big_bird_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.''' ) __snake_case :Any = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
49
0
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": _SCREAMING_SNAKE_CASE = pd.read_csv("""sample_data.csv""", header=None) _SCREAMING_SNAKE_CASE = df.shape[:1][0] # If you're using some other dataset input the target column _SCREAMING_SNAKE_CASE = df.iloc[:, 1:2] _SCREAMING_SNAKE_CASE = actual_data.values.reshape(len_data, 1) _SCREAMING_SNAKE_CASE = MinMaxScaler().fit_transform(actual_data) _SCREAMING_SNAKE_CASE = 10 _SCREAMING_SNAKE_CASE = 5 _SCREAMING_SNAKE_CASE = 20 _SCREAMING_SNAKE_CASE = len_data - periods * look_back _SCREAMING_SNAKE_CASE = actual_data[:division] _SCREAMING_SNAKE_CASE = actual_data[division - look_back :] _SCREAMING_SNAKE_CASE = [], [] _SCREAMING_SNAKE_CASE = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) _SCREAMING_SNAKE_CASE = np.array(train_x) _SCREAMING_SNAKE_CASE = np.array(test_x) _SCREAMING_SNAKE_CASE = np.array([list(i.ravel()) for i in train_y]) _SCREAMING_SNAKE_CASE = np.array([list(i.ravel()) for i in test_y]) _SCREAMING_SNAKE_CASE = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") _SCREAMING_SNAKE_CASE = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) _SCREAMING_SNAKE_CASE = model.predict(x_test)
327
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase ) else: __a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase ) for i, tensor in enumerate(_UpperCAmelCase ): if padding_side == "right": if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] return out_tensor.tolist() def __snake_case ( _UpperCAmelCase ): __a = ord(_UpperCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __a = unicodedata.category(_UpperCAmelCase ) if cat.startswith('''P''' ): return True return False @dataclass class _A ( __UpperCAmelCase ): UpperCamelCase__ : PreTrainedTokenizerBase UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = -100 UpperCamelCase__ : str = "pt" def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' import torch __a = '''label''' if '''label''' in features[0].keys() else '''labels''' __a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __a = self.tokenizer.pad( __SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __a = torch.tensor(batch['''entity_ids''']).shape[1] __a = self.tokenizer.padding_side if padding_side == "right": __a = [ list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels ] else: __a = [ [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels ] __a = [feature['''ner_tags'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = [feature['''original_entity_spans'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()} return batch
49
0
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __A : List[Any] = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s __A : List[str] = 3e8 # unit of c : m * s^-1 def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if force < 0: raise ValueError('''Magnitude of force can not be negative''' ) if distance < 0: raise ValueError('''Distance can not be negative''' ) if area < 0: raise ValueError('''Area can not be negative''' ) if force == 0: _UpperCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: _UpperCAmelCase = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: _UpperCAmelCase = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('''One and only one argument must be 0''' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
260
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def __snake_case ( ): __a , __a = 9, 14 # noqa: F841 __a = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __a = defaultdict(_UpperCAmelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __a = mst(_UpperCAmelCase ) __a = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __a = tuple(answer[:2] ) __a = tuple(edge[::-1] ) assert edge in result or reverse in result
49
0
from math import ceil, sqrt def lowercase_ ( _A : str = 1000000 ): """simple docstring""" lowerCamelCase__ : List[Any] = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase__ : Tuple = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase__ : Any = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
184
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] __a = '''fp16''' self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
49
0
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class lowerCAmelCase_ (nn.Module ): """simple docstring""" __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = hidden_states.shape SCREAMING_SNAKE_CASE__ : Optional[int] = jax.image.resize( __SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , ) SCREAMING_SNAKE_CASE__ : List[str] = self.conv(__SCREAMING_SNAKE_CASE ) return hidden_states class lowerCAmelCase_ (nn.Module ): """simple docstring""" __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.conv(__SCREAMING_SNAKE_CASE ) return hidden_states class lowerCAmelCase_ (nn.Module ): """simple docstring""" __UpperCamelCase : int __UpperCamelCase : int = None __UpperCamelCase : float = 0.0 __UpperCamelCase : bool = None __UpperCamelCase : jnp.dtype = jnp.floataa def __magic_name__ (self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels SCREAMING_SNAKE_CASE__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) SCREAMING_SNAKE_CASE__ : Tuple = nn.Conv( __SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Dense(__SCREAMING_SNAKE_CASE , dtype=self.dtype ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) SCREAMING_SNAKE_CASE__ : str = nn.Dropout(self.dropout_prob ) SCREAMING_SNAKE_CASE__ : int = nn.Conv( __SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) SCREAMING_SNAKE_CASE__ : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut SCREAMING_SNAKE_CASE__ : Any = None if use_nin_shortcut: SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Conv( __SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , ) def __call__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = hidden_states SCREAMING_SNAKE_CASE__ : Dict = self.norma(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : Dict = nn.swish(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.conva(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : Tuple = self.time_emb_proj(nn.swish(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE__ : List[str] = jnp.expand_dims(jnp.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , 1 ) SCREAMING_SNAKE_CASE__ : Dict = hidden_states + temb SCREAMING_SNAKE_CASE__ : Tuple = self.norma(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : Tuple = nn.swish(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : List[Any] = self.dropout(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : List[str] = self.conva(__SCREAMING_SNAKE_CASE ) if self.conv_shortcut is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.conv_shortcut(__SCREAMING_SNAKE_CASE ) return hidden_states + residual
25
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __snake_case :Dict = '''bart''' __snake_case :Tuple = True @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): if LOAD_DENSE_INDEX: __a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __a = qar_model.eval() else: __a , __a = (None, None) if MODEL_TYPE == "bart": __a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __a = sas_model.eval() else: __a , __a = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): if LOAD_DENSE_INDEX: __a = faiss.StandardGpuResources() __a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __a = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __a = faiss.IndexFlatIP(128 ) __a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase ) wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU else: __a , __a = (None, None) __a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): __a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __a = elia['''train_eli5'''] __a = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __a = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_UpperCAmelCase ) return (elia_train, eli5_train_q_index) __snake_case ,__snake_case ,__snake_case :List[str] = load_indexes() __snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models() __snake_case ,__snake_case :Tuple = load_train_data() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ): __a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase ) __a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase ) __a = [elia_train[int(_UpperCAmelCase )] for i in I[0]] return nn_examples def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ): if source == "none": __a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __a , __a = query_qa_dense_index( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: __a , __a = query_es_index( _UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , ) __a = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _UpperCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None), } ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ): with torch.no_grad(): __a = qa_sas_generate( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar __snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' __snake_case :int = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __snake_case :int = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) __snake_case :Union[str, Any] = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] __snake_case :int = st.sidebar.checkbox('''Demo options''') if demo_options: __snake_case :str = st.sidebar.selectbox( '''''', action_list, index=3, ) __snake_case :Tuple = action_list.index(action_st) __snake_case :Optional[int] = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) __snake_case :Dict = show_type == '''Show full text of passages''' else: __snake_case :Dict = 3 __snake_case :str = True __snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: __snake_case :List[str] = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) __snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) __snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: __snake_case :Optional[int] = '''wiki40b''' __snake_case :Dict = '''dense''' __snake_case :Dict = '''beam''' __snake_case :int = 2 __snake_case :str = 64 __snake_case :Tuple = 256 __snake_case :int = None __snake_case :List[Any] = None __snake_case :int = st.sidebar.checkbox('''Generation options''') if generate_options: __snake_case :Tuple = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) __snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) __snake_case :Dict = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __snake_case :Dict = st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __snake_case :Tuple = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) __snake_case :Any = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) __snake_case :Any = None # start main text __snake_case :Dict = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] __snake_case :int = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": __snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''') else: __snake_case :Optional[int] = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": __snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10) __snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10) __snake_case :Optional[Any] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __snake_case :Union[str, Any] = support_list[:10] __snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: __snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __snake_case ,__snake_case :Optional[int] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): __snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) __snake_case :int = res[1].strip() if sec_titles == "": __snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url) else: __snake_case :Optional[int] = sec_titles.split(''' & ''') __snake_case :str = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: __snake_case :str = find_nearest_training(question) __snake_case :str = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) __snake_case :Optional[Any] = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) __snake_case :Tuple = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
49
0
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class a_ : '''simple docstring''' UpperCamelCase = None UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = None UpperCamelCase = None UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = True UpperCamelCase = None UpperCamelCase = 1 UpperCamelCase = None UpperCamelCase = False UpperCamelCase = None UpperCamelCase = None def snake_case_( self ) -> List[Any]: return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE ) for k, v in self.__dict__.items()} )
58
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _A ( __UpperCAmelCase ): def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = eval_examples __a = post_process_function def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = gen_kwargs.copy() __a = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length ) __a = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams ) __a = gen_kwargs __a = self.eval_dataset if eval_dataset is None else eval_dataset __a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE) __a = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) else: __a = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE) return metrics def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = gen_kwargs.copy() __a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE) # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''') __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
49
0
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowerCAmelCase :str = StableDiffusionDiffEditPipeline lowerCAmelCase :List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} lowerCAmelCase :List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} lowerCAmelCase :List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase :Any = frozenset([] ) def snake_case__ ( self): torch.manual_seed(0) UpperCAmelCase__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__SCREAMING_SNAKE_CASE , ) UpperCAmelCase__ : Dict = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) UpperCAmelCase__ : Tuple = DDIMInverseScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_zero=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) UpperCAmelCase__ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) UpperCAmelCase__ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase__ : Optional[Any] = CLIPTextModel(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=0): UpperCAmelCase__ : List[str] = floats_tensor((1, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE) if str(__SCREAMING_SNAKE_CASE).startswith("""mps"""): UpperCAmelCase__ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE) else: UpperCAmelCase__ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Optional[Any] = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=0): UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0] UpperCAmelCase__ : Any = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE)).convert("""RGB""") if str(__SCREAMING_SNAKE_CASE).startswith("""mps"""): UpperCAmelCase__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE) else: UpperCAmelCase__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=0): UpperCAmelCase__ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1)[0] UpperCAmelCase__ : int = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE)).convert("""RGB""") if str(__SCREAMING_SNAKE_CASE).startswith("""mps"""): UpperCAmelCase__ : str = torch.manual_seed(__SCREAMING_SNAKE_CASE) else: UpperCAmelCase__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def snake_case__ ( self): if not hasattr(self.pipeline_class , """_optional_components"""): return UpperCAmelCase__ : Optional[int] = self.get_dummy_components() UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE) pipe.to(__SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) UpperCAmelCase__ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : str = pipe(**__SCREAMING_SNAKE_CASE)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : str = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE) pipe_loaded.to(__SCREAMING_SNAKE_CASE) pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) for optional_component in pipe._optional_components: self.assertTrue( getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Optional[Any] = pipe_loaded(**__SCREAMING_SNAKE_CASE)[0] UpperCAmelCase__ : List[str] = np.abs(output - output_loaded).max() self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4) def snake_case__ ( self): UpperCAmelCase__ : Tuple = """cpu""" UpperCAmelCase__ : Optional[Any] = self.get_dummy_components() UpperCAmelCase__ : Optional[int] = self.pipeline_class(**__SCREAMING_SNAKE_CASE) pipe.to(__SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : List[Any] = self.get_dummy_mask_inputs(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : List[str] = pipe.generate_mask(**__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Optional[Any] = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16)) UpperCAmelCase__ : List[Any] = np.array([0] * 9) UpperCAmelCase__ : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3) self.assertEqual(mask[0, -3, -4] , 0) def snake_case__ ( self): UpperCAmelCase__ : Union[str, Any] = """cpu""" UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase__ : int = self.pipeline_class(**__SCREAMING_SNAKE_CASE) pipe.to(__SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Optional[int] = self.get_dummy_inversion_inputs(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Dict = pipe.invert(**__SCREAMING_SNAKE_CASE).images UpperCAmelCase__ : Any = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) UpperCAmelCase__ : Optional[int] = np.array( [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , ) UpperCAmelCase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3) def snake_case__ ( self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = """cpu""" UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : List[str] = {"""beta_start""": 0.00085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""} UpperCAmelCase__ : Optional[Any] = DPMSolverMultistepScheduler(**__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Any = DPMSolverMultistepInverseScheduler(**__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE) pipe.to(__SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Any = self.get_dummy_inversion_inputs(__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Tuple = pipe.invert(**__SCREAMING_SNAKE_CASE).images UpperCAmelCase__ : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) UpperCAmelCase__ : Optional[int] = np.array( [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , ) UpperCAmelCase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3) @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): def snake_case__ ( self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def snake_case__ ( cls): UpperCAmelCase__ : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""") UpperCAmelCase__ : Dict = raw_image.convert("""RGB""").resize((768, 768)) UpperCAmelCase__ : str = raw_image def snake_case__ ( self): UpperCAmelCase__ : str = torch.manual_seed(0) UpperCAmelCase__ : Dict = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa) UpperCAmelCase__ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config) UpperCAmelCase__ : Optional[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : Dict = """a bowl of fruit""" UpperCAmelCase__ : str = """a bowl of pears""" UpperCAmelCase__ : Optional[Any] = pipe.generate_mask( image=self.raw_image , source_prompt=__SCREAMING_SNAKE_CASE , target_prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , ) UpperCAmelCase__ : List[Any] = pipe.invert( prompt=__SCREAMING_SNAKE_CASE , image=self.raw_image , inpaint_strength=0.7 , generator=__SCREAMING_SNAKE_CASE).latents UpperCAmelCase__ : Tuple = pipe( prompt=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_latents=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase__ : List[Any] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""").resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5e-1 def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0) UpperCAmelCase__ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa) UpperCAmelCase__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) UpperCAmelCase__ : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) UpperCAmelCase__ : List[Any] = """a bowl of fruit""" UpperCAmelCase__ : Any = """a bowl of pears""" UpperCAmelCase__ : str = pipe.generate_mask( image=self.raw_image , source_prompt=__SCREAMING_SNAKE_CASE , target_prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , ) UpperCAmelCase__ : Optional[Any] = pipe.invert( prompt=__SCREAMING_SNAKE_CASE , image=self.raw_image , inpaint_strength=0.7 , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=25 , ).latents UpperCAmelCase__ : Any = pipe( prompt=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_latents=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase__ : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""").resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5e-1
163
from __future__ import annotations from typing import Any def __snake_case ( _UpperCAmelCase ): if not postfix_notation: return 0 __a = {'''+''', '''-''', '''*''', '''/'''} __a = [] for token in postfix_notation: if token in operations: __a , __a = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_UpperCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
49
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __lowerCamelCase ( __UpperCAmelCase): """simple docstring""" UpperCamelCase__ = '''data2vec-text''' def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __lowerCamelCase ( __UpperCAmelCase): """simple docstring""" @property def UpperCamelCase ( self ): """simple docstring""" if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
39
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __snake_case :Optional[int] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __snake_case :List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __snake_case :List[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = random.randint(0 , len(_UpperCAmelCase ) - 1 ) __a = parent_a[:random_slice] + parent_a[random_slice:] __a = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __a = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): __a = [] # Generate more children proportionally to the fitness score. __a = int(parent_a[1] * 100 ) + 1 __a = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): __a = population_score[random.randint(0 , _UpperCAmelCase )][0] __a , __a = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __a = f'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. __a = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __a = f'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. __a = [] for _ in range(_UpperCAmelCase ): population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. __a , __a = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. __a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'\nGeneration: {generation}' f'\nTotal Population:{total_population}' f'\nBest score: {population_score[0][1]}' f'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __a = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. __a = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __snake_case :Optional[int] = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __snake_case :List[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
49
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class _lowerCamelCase ( __UpperCAmelCase ): UpperCAmelCase_ = ['''input_values''', '''padding_mask'''] def __init__(self , __a = 1 , __a = 2_40_00 , __a = 0.0 , __a = None , __a = None , **__a , ) -> List[Any]: super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase = chunk_length_s UpperCamelCase = overlap @property def snake_case_ (self ) -> Any: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def snake_case_ (self ) -> int: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__(self , __a , __a = None , __a = False , __a = None , __a = None , __a = None , ) -> List[Any]: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" F" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs UpperCamelCase = True UpperCamelCase = bool( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): UpperCamelCase = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): UpperCamelCase = raw_audio.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase = [np.asarray(__SCREAMING_SNAKE_CASE ).T] # verify inputs are valid for idx, example in enumerate(__SCREAMING_SNAKE_CASE ): if example.ndim > 2: raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" ) UpperCamelCase = None UpperCamelCase = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: UpperCamelCase = min(array.shape[0] for array in raw_audio ) UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) ) UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: UpperCamelCase = max(array.shape[0] for array in raw_audio ) UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) ) UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length UpperCamelCase = "max_length" else: UpperCamelCase = input_values # normal padding on batch if padded_inputs is None: UpperCamelCase = self.pad( __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , ) if padding: UpperCamelCase = padded_inputs.pop("attention_mask" ) UpperCamelCase = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: UpperCamelCase = example[..., None] input_values.append(example.T ) UpperCamelCase = input_values if return_tensors is not None: UpperCamelCase = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE ) return padded_inputs
153
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = LxmertConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) __a = LxmertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": __snake_case :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __snake_case :Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
49
0
from math import pi def __lowerCAmelCase ( a__ , a__ ) -> str: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(9_0, 1_0))
6
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ): __a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __a = f'{olid} is not a valid Open Library olid' raise ValueError(_UpperCAmelCase ) return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json() def __snake_case ( _UpperCAmelCase ): __a = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __a = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __a = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = ''', '''.join(_UpperCAmelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(f'\nSearching Open Library for ISBN: {isbn}...\n') try: __snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}')) print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'Sorry, there are no results for ISBN: {isbn}.')
49
0
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : List[str]) -> str: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase): a__ : Union[str, Any] = np.full((len(_UpperCAmelCase), sequence_length, 2) , _UpperCAmelCase) else: a__ : Optional[int] = np.full((len(_UpperCAmelCase), sequence_length) , _UpperCAmelCase) for i, tensor in enumerate(_UpperCAmelCase): if padding_side == "right": if isinstance(_UpperCAmelCase , _UpperCAmelCase): a__ : List[str] = tensor[:sequence_length] else: a__ : Optional[int] = tensor[:sequence_length] else: if isinstance(_UpperCAmelCase , _UpperCAmelCase): a__ : List[Any] = tensor[:sequence_length] else: a__ : int = tensor[:sequence_length] return out_tensor.tolist() def lowerCAmelCase_ ( _lowercase : str) -> Dict: """simple docstring""" a__ : int = ord(_UpperCAmelCase) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True a__ : Optional[int] = unicodedata.category(_UpperCAmelCase) if cat.startswith("""P"""): return True return False @dataclass class snake_case__ (__UpperCAmelCase ): """simple docstring""" __lowerCAmelCase :PreTrainedTokenizerBase __lowerCAmelCase :Union[bool, str, PaddingStrategy] = True __lowerCAmelCase :Optional[int] = None __lowerCAmelCase :Optional[int] = None __lowerCAmelCase :int = -100 __lowerCAmelCase :str = "pt" def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int: """simple docstring""" import torch a__ : Optional[Any] = """label""" if """label""" in features[0].keys() else """labels""" a__ : Optional[int] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None a__ : Optional[int] = self.tokenizer.pad( __SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , ) if labels is None: return batch a__ : List[Any] = torch.tensor(batch["""entity_ids"""] ).shape[1] a__ : Optional[Any] = self.tokenizer.padding_side if padding_side == "right": a__ : Tuple = [ list(__SCREAMING_SNAKE_CASE ) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE )) for label in labels ] else: a__ : int = [ [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE )) + list(__SCREAMING_SNAKE_CASE ) for label in labels ] a__ : Optional[int] = [feature["""ner_tags"""] for feature in features] a__ : str = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : Optional[Any] = [feature["""original_entity_spans"""] for feature in features] a__ : Any = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a__ : Optional[int] = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa ) for k, v in batch.items()} return batch
170
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _A ( __UpperCAmelCase ): def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths} __a = Text( cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[str]): '''simple docstring''' if self.streaming: __a = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: __a = None __a = None __a = None __a = None self.builder.download_and_prepare( download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) __a = self.builder.as_dataset( split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory) return dataset
49
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[str] = CycleDiffusionPipeline _SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } _SCREAMING_SNAKE_CASE : Any = PipelineTesterMixin.required_optional_params - {'''latents'''} _SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} ) _SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS _SCREAMING_SNAKE_CASE : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def a ( self : Tuple ) -> List[Any]: torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __lowerCAmelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , num_train_timesteps=10_00 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=0 ) -> List[str]: __lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = image / 2 + 0.5 if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def a ( self : Dict ) -> Optional[int]: __lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __lowerCAmelCase = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def a ( self : List[Any] ) -> List[Any]: __lowerCAmelCase = self.get_dummy_components() for name, module in components.items(): if hasattr(__SCREAMING_SNAKE_CASE , """half""" ): __lowerCAmelCase = module.half() __lowerCAmelCase = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __lowerCAmelCase = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def a ( self : Union[str, Any] ) -> Optional[int]: return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def a ( self : Optional[Any] ) -> Union[str, Any]: return super().test_inference_batch_single_identical() @skip_mps def a ( self : Tuple ) -> Union[str, Any]: return super().test_dict_tuple_outputs_equivalent() @skip_mps def a ( self : List[Any] ) -> int: return super().test_save_load_optional_components() @skip_mps def a ( self : int ) -> str: return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def a ( self : List[str] ) -> List[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : List[Any] ) -> Tuple: __lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) __lowerCAmelCase = init_image.resize((5_12, 5_12) ) __lowerCAmelCase = """CompVis/stable-diffusion-v1-4""" __lowerCAmelCase = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="""scheduler""" ) __lowerCAmelCase = CycleDiffusionPipeline.from_pretrained( __SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() __lowerCAmelCase = """A black colored car""" __lowerCAmelCase = """A blue colored car""" __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pipe( prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __lowerCAmelCase = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def a ( self : Union[str, Any] ) -> int: __lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) __lowerCAmelCase = init_image.resize((5_12, 5_12) ) __lowerCAmelCase = """CompVis/stable-diffusion-v1-4""" __lowerCAmelCase = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="""scheduler""" ) __lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) pipe.enable_attention_slicing() __lowerCAmelCase = """A black colored car""" __lowerCAmelCase = """A blue colored car""" __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = pipe( prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , ) __lowerCAmelCase = output.images assert np.abs(image - expected_image ).max() < 2e-2
229
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __snake_case :List[str] = '''\ Text data. Second line of data.''' __snake_case :Optional[Any] = '''file''' @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') __a = bytes(_UpperCAmelCase , '''utf-8''' ) with zstd.open(_UpperCAmelCase , '''wb''' ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def __snake_case ( _UpperCAmelCase ): with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} __a = input_paths[compression_format] __a = tmp_path / '''cache''' __a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: __a = f.read() with open(_UpperCAmelCase ) as f: __a = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''custom_cache''' __a = '''custom_extracted_dir''' __a = tmp_path / '''custom_extracted_path''' if default_extracted: __a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) ) __a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __a = xz_file __a = ( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase ) ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path __a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path __a = '''./__missing_file__.txt''' with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = get_from_cache(f'tmp://{tmpfs_file}' ) with open(_UpperCAmelCase ) as f: __a = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( ): with pytest.raises(_UpperCAmelCase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head('''s3://huggingface.co''' )
49
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = (DDPMParallelScheduler,) def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE) return config def _lowerCamelCase ( self : List[str]): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5 def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = self.dummy_sample_deter + 0.1 __a = self.dummy_sample_deter - 0.1 __a = samplea.shape[0] __a = torch.stack([samplea, samplea, samplea] , dim=0) __a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE) __a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) __a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1)) __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 11_53.18_33) < 1E-2 assert abs(result_mean.item() - 0.50_05) < 1E-3 def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_58.96_06) < 1E-2 assert abs(result_mean.item() - 0.33_72) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(prediction_type='''v_prediction''') __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_02.02_96) < 1E-2 assert abs(result_mean.item() - 0.26_31) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) __a = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE): if i == len(__SCREAMING_SNAKE_CASE) - 1: __a = -1 else: __a = timesteps[i + 1] __a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE) __a = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] __a = len(__SCREAMING_SNAKE_CASE) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __A : Optional[Any] = logging.get_logger(__name__) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_UpperCAmelCase ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field( default=[] , metadata={ """help""": ( """Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version""" """ of all available models""" ) } , ) UpperCamelCase__ = list_field( default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""}) UpperCamelCase__ = list_field( default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , ) UpperCamelCase__ = field( default=__UpperCAmelCase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , ) UpperCamelCase__ = field( default=__UpperCAmelCase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , ) UpperCamelCase__ = field( default=__UpperCAmelCase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""}) UpperCamelCase__ = field(default=__UpperCAmelCase , metadata={"""help""": """Use FP16 to accelerate inference."""}) UpperCamelCase__ = field(default=__UpperCAmelCase , metadata={"""help""": """Benchmark training of model"""}) UpperCamelCase__ = field(default=__UpperCAmelCase , metadata={"""help""": """Verbose memory tracing"""}) UpperCamelCase__ = field( default=__UpperCAmelCase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , ) UpperCamelCase__ = field( default=__UpperCAmelCase , metadata={ """help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory""" } , ) UpperCamelCase__ = field(default=__UpperCAmelCase , metadata={"""help""": """Trace memory line by line"""}) UpperCamelCase__ = field(default=__UpperCAmelCase , metadata={"""help""": """Save result to a CSV file"""}) UpperCamelCase__ = field(default=__UpperCAmelCase , metadata={"""help""": """Save all print statements in a log file"""}) UpperCamelCase__ = field(default=__UpperCAmelCase , metadata={"""help""": """Whether to print environment information"""}) UpperCamelCase__ = field( default=__UpperCAmelCase , metadata={ """help""": ( """Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use""" """ multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled""" """ for debugging / testing and on TPU.""" ) } , ) UpperCamelCase__ = field( default=F"inference_time_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , ) UpperCamelCase__ = field( default=F"inference_memory_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , ) UpperCamelCase__ = field( default=F"train_time_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , ) UpperCamelCase__ = field( default=F"train_memory_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , ) UpperCamelCase__ = field( default=F"env_info_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving environment information."""} , ) UpperCamelCase__ = field( default=F"log_{round(time())}.csv" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , ) UpperCamelCase__ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""}) UpperCamelCase__ = field( default=__UpperCAmelCase , metadata={ """help""": ( """Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain""" """ model weights.""" ) } , ) def lowercase__ ( self : List[str] )->Dict: warnings.warn( F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , __SCREAMING_SNAKE_CASE , ) def lowercase__ ( self : Tuple )->str: return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def lowercase__ ( self : str )->Any: if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def lowercase__ ( self : int )->List[str]: if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
260
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __snake_case :List[Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) requires_backends(self , '''vision''') requires_backends(self , '''torch''') if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.') self.check_model_type(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = {} __a = {} __a = {} # preprocess args if "points_per_batch" in kwargs: __a = kwargs['''points_per_batch'''] if "points_per_crop" in kwargs: __a = kwargs['''points_per_crop'''] if "crops_n_layers" in kwargs: __a = kwargs['''crops_n_layers'''] if "crop_overlap_ratio" in kwargs: __a = kwargs['''crop_overlap_ratio'''] if "crop_n_points_downscale_factor" in kwargs: __a = kwargs['''crop_n_points_downscale_factor'''] # postprocess args if "pred_iou_thresh" in kwargs: __a = kwargs['''pred_iou_thresh'''] if "stability_score_offset" in kwargs: __a = kwargs['''stability_score_offset'''] if "mask_threshold" in kwargs: __a = kwargs['''mask_threshold'''] if "stability_score_thresh" in kwargs: __a = kwargs['''stability_score_thresh'''] if "crops_nms_thresh" in kwargs: __a = kwargs['''crops_nms_thresh'''] if "output_rle_mask" in kwargs: __a = kwargs['''output_rle_mask'''] if "output_bboxes_mask" in kwargs: __a = kwargs['''output_bboxes_mask'''] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ): '''simple docstring''' __a = load_image(__SCREAMING_SNAKE_CASE) __a = self.image_processor.size['''longest_edge'''] __a , __a , __a , __a = self.image_processor.generate_crop_boxes( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') with self.device_placement(): if self.framework == "pt": __a = self.get_inference_context() with inference_context(): __a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''')) __a = image_embeddings __a = grid_points.shape[1] __a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( '''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ''' '''To return all points at once, set points_per_batch to None''') for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = grid_points[:, i : i + points_per_batch, :, :] __a = input_labels[:, i : i + points_per_batch] __a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ): '''simple docstring''' __a = model_inputs.pop('''input_boxes''') __a = model_inputs.pop('''is_last''') __a = model_inputs.pop('''original_sizes''').tolist() __a = model_inputs.pop('''reshaped_input_sizes''').tolist() __a = self.model(**__SCREAMING_SNAKE_CASE) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __a = model_outputs['''pred_masks'''] __a = self.image_processor.post_process_masks( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE) __a = model_outputs['''iou_scores'''] __a , __a , __a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ): '''simple docstring''' __a = [] __a = [] __a = [] for model_output in model_outputs: all_scores.append(model_output.pop('''iou_scores''')) all_masks.extend(model_output.pop('''masks''')) all_boxes.append(model_output.pop('''boxes''')) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a , __a , __a , __a = self.image_processor.post_process_for_mask_generation( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = defaultdict(__SCREAMING_SNAKE_CASE) for output in model_outputs: for k, v in output.items(): extra[k].append(__SCREAMING_SNAKE_CASE) __a = {} if output_rle_mask: __a = rle_mask if output_bboxes_mask: __a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
49
0
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class _lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule): """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : float , __lowerCamelCase : Callable , __lowerCamelCase : int , __lowerCamelCase : float = 1.0 , __lowerCamelCase : str = None , ): '''simple docstring''' super().__init__() lowerCamelCase__ : Dict = initial_learning_rate lowerCamelCase__ : Any = warmup_steps lowerCamelCase__ : int = power lowerCamelCase__ : Any = decay_schedule_fn lowerCamelCase__ : Union[str, Any] = name def __call__( self : Union[str, Any] , __lowerCamelCase : List[str] ): '''simple docstring''' with tf.name_scope(self.name or "WarmUp" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowerCamelCase__ : Any = tf.cast(__SCREAMING_SNAKE_CASE , tf.floataa ) lowerCamelCase__ : Tuple = tf.cast(self.warmup_steps , tf.floataa ) lowerCamelCase__ : Any = global_step_float / warmup_steps_float lowerCamelCase__ : Dict = self.initial_learning_rate * tf.math.pow(__SCREAMING_SNAKE_CASE , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__SCREAMING_SNAKE_CASE , ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def lowercase_ ( _A : int , _A : List[Any] , _A : Any , _A : int = 0.0 , _A : Optional[int] = 0.9 , _A : Tuple = 0.999 , _A : Optional[int] = 1E-8 , _A : List[Any] = None , _A : List[Any] = None , _A : Tuple = 0.0 , _A : Any = 1.0 , _A : Union[str, Any] = None , ): """simple docstring""" lowerCamelCase__ : Optional[Any] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=_UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_UpperCAmelCase , ) if num_warmup_steps: lowerCamelCase__ : Tuple = WarmUp( initial_learning_rate=_UpperCAmelCase , decay_schedule_fn=_UpperCAmelCase , warmup_steps=_UpperCAmelCase , ) if weight_decay_rate > 0.0: lowerCamelCase__ : Dict = AdamWeightDecay( learning_rate=_UpperCAmelCase , weight_decay_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_UpperCAmelCase , ) else: lowerCamelCase__ : Dict = tf.keras.optimizers.Adam( learning_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class _lowercase ( __UpperCAmelCase): """simple docstring""" def __init__( self : str , __lowerCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , __lowerCamelCase : float = 0.9 , __lowerCamelCase : float = 0.9_9_9 , __lowerCamelCase : float = 1E-7 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "AdamWeightDecay" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCamelCase__ : Dict = weight_decay_rate lowerCamelCase__ : List[Any] = include_in_weight_decay lowerCamelCase__ : List[Any] = exclude_from_weight_decay @classmethod def lowerCAmelCase ( cls : Optional[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = {"WarmUp": WarmUp} return super(__SCREAMING_SNAKE_CASE , cls ).from_config(__SCREAMING_SNAKE_CASE , custom_objects=__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ): '''simple docstring''' super(__SCREAMING_SNAKE_CASE , self )._prepare_local(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCamelCase__ : Optional[int] = tf.constant( self.weight_decay_rate , name="adam_weight_decay_rate" ) def lowerCAmelCase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , ) return tf.no_op() def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : str = list(zip(*__SCREAMING_SNAKE_CASE ) ) return super(__SCREAMING_SNAKE_CASE , self ).apply_gradients(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , name=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] ): '''simple docstring''' if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowerCamelCase__ : str = apply_state or {} lowerCamelCase__ : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowerCamelCase__ : Optional[Any] = self._fallback_apply_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCamelCase__ : List[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=None ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE ) lowerCamelCase__ : str = self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tf.control_dependencies([decay] ): return super(__SCREAMING_SNAKE_CASE , self )._resource_apply_dense(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE ) lowerCamelCase__ : List[str] = self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tf.control_dependencies([decay] ): return super(__SCREAMING_SNAKE_CASE , self )._resource_apply_sparse(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[int] = super().get_config() config.update({"weight_decay_rate": self.weight_decay_rate} ) return config def lowerCAmelCase ( self : Any , __lowerCamelCase : Dict ): '''simple docstring''' if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is not None: return False return True class _lowercase ( __UpperCAmelCase): """simple docstring""" def __init__( self : Tuple ): '''simple docstring''' lowerCamelCase__ : str = [] lowerCamelCase__ : Optional[Any] = None @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self._accum_steps is None: lowerCamelCase__ : Dict = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self._gradients: raise ValueError("The accumulator should be called first to initialize the gradients" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if not self._gradients: lowerCamelCase__ : List[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(__SCREAMING_SNAKE_CASE ) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(__SCREAMING_SNAKE_CASE ) != len(self._gradients ): raise ValueError(f"Expected {len(self._gradients )} gradients, but got {len(__SCREAMING_SNAKE_CASE )}" ) for accum_gradient, gradient in zip(self._gradients , __SCREAMING_SNAKE_CASE ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(__SCREAMING_SNAKE_CASE ) self._accum_steps.assign_add(1 ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(__SCREAMING_SNAKE_CASE ) )
184
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __snake_case :str = logging.get_logger(__name__) __snake_case :int = {'''vocab_file''': '''vocab.txt'''} __snake_case :List[Any] = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __snake_case :List[str] = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __snake_case :Optional[int] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : int = ConvBertTokenizer def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars ): __a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''')) __a = do_lower_case __a = strip_accents __a = tokenize_chinese_chars __a = normalizer_class(**__SCREAMING_SNAKE_CASE) __a = do_lower_case def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None): '''simple docstring''' __a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' __a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE) return tuple(__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=19 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : Any = seq_length SCREAMING_SNAKE_CASE__ : List[Any] = is_training SCREAMING_SNAKE_CASE__ : Any = use_input_mask SCREAMING_SNAKE_CASE__ : Tuple = use_token_type_ids SCREAMING_SNAKE_CASE__ : List[Any] = use_labels SCREAMING_SNAKE_CASE__ : Tuple = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE__ : int = num_hidden_layers SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : List[Any] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : int = num_labels SCREAMING_SNAKE_CASE__ : Any = num_choices SCREAMING_SNAKE_CASE__ : str = scope def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , ) return config def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float() model.to(__SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : int = model(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ : List[Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ (__UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Tuple = False __UpperCamelCase : Optional[Any] = (EsmForProteinFolding,) if is_torch_available() else () __UpperCamelCase : List[str] = () __UpperCamelCase : str = {} if is_torch_available() else {} __UpperCamelCase : Optional[Any] = False def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = EsmFoldModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @unittest.skip("""Does not support attention outputs""" ) def __magic_name__ (self ) -> Dict: """simple docstring""" pass @unittest.skip def __magic_name__ (self ) -> int: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""ESMFold does not support passing input embeds!""" ) def __magic_name__ (self ) -> Tuple: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def __magic_name__ (self ) -> str: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def __magic_name__ (self ) -> Any: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""ESMFold does not output hidden states in the normal way.""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" pass @unittest.skip("""ESMfold does not output hidden states in the normal way.""" ) def __magic_name__ (self ) -> Any: """simple docstring""" pass @unittest.skip("""ESMFold only has one output format.""" ) def __magic_name__ (self ) -> Tuple: """simple docstring""" pass @unittest.skip("""This test doesn\'t work for ESMFold and doesn\'t test core functionality""" ) def __magic_name__ (self ) -> int: """simple docstring""" pass @unittest.skip("""ESMFold does not support input chunking.""" ) def __magic_name__ (self ) -> Any: """simple docstring""" pass @unittest.skip("""ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.""" ) def __magic_name__ (self ) -> int: """simple docstring""" pass @unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" ) def __magic_name__ (self ) -> List[str]: """simple docstring""" pass @unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""ESMFold doesn\'t support data parallel.""" ) def __magic_name__ (self ) -> Tuple: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" pass @require_torch class lowerCAmelCase_ (__UpperCAmelCase ): """simple docstring""" @slow def __magic_name__ (self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float() model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) SCREAMING_SNAKE_CASE__ : List[str] = model(__SCREAMING_SNAKE_CASE )["""positions"""] SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
25
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case :Any = logging.get_logger(__name__) __snake_case :Optional[Any] = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } __snake_case :List[Any] = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def __snake_case ( _UpperCAmelCase ): __a = EfficientNetConfig() __a = CONFIG_MAP[model_name]['''hidden_dim'''] __a = CONFIG_MAP[model_name]['''width_coef'''] __a = CONFIG_MAP[model_name]['''depth_coef'''] __a = CONFIG_MAP[model_name]['''image_size'''] __a = CONFIG_MAP[model_name]['''dropout_rate'''] __a = CONFIG_MAP[model_name]['''dw_padding'''] __a = '''huggingface/label-files''' __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def __snake_case ( ): __a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im def __snake_case ( _UpperCAmelCase ): __a = CONFIG_MAP[model_name]['''image_size'''] __a = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , ) return preprocessor def __snake_case ( _UpperCAmelCase ): __a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] __a = sorted(set(_UpperCAmelCase ) ) __a = len(_UpperCAmelCase ) __a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )} __a = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: __a = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) __a = {} for item in rename_keys: if item[0] in original_param_names: __a = '''efficientnet.''' + item[1] __a = '''classifier.weight''' __a = '''classifier.bias''' return key_mapping def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for key, value in tf_params.items(): if "normalization" in key: continue __a = key_mapping[key] if "_conv" in key and "kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __a = torch.from_numpy(np.transpose(_UpperCAmelCase ) ) else: __a = torch.from_numpy(_UpperCAmelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCAmelCase ) @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = model_classes[model_name]( include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , ) __a = original_model.trainable_variables __a = original_model.non_trainable_variables __a = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __a = param.numpy() __a = list(tf_params.keys() ) # Load HuggingFace model __a = get_efficientnet_config(_UpperCAmelCase ) __a = EfficientNetForImageClassification(_UpperCAmelCase ).eval() __a = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) __a = rename_keys(_UpperCAmelCase ) replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Initialize preprocessor and preprocess input image __a = convert_image_processor(_UpperCAmelCase ) __a = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): __a = hf_model(**_UpperCAmelCase ) __a = outputs.logits.detach().numpy() # Original model inference __a = False __a = CONFIG_MAP[model_name]['''image_size'''] __a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __a = image.img_to_array(_UpperCAmelCase ) __a = np.expand_dims(_UpperCAmelCase , axis=0 ) __a = original_model.predict(_UpperCAmelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCAmelCase ): os.mkdir(_UpperCAmelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCAmelCase ) preprocessor.save_pretrained(_UpperCAmelCase ) if push_to_hub: # Push model and image processor to hub print(f'Pushing converted {model_name} to the hub...' ) __a = f'efficientnet-{model_name}' preprocessor.push_to_hub(_UpperCAmelCase ) hf_model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') __snake_case :Optional[int] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
49
0
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : int ) ->Union[str, Any]: _SCREAMING_SNAKE_CASE = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) ->Dict: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) _SCREAMING_SNAKE_CASE = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' ) _SCREAMING_SNAKE_CASE = in_proj_weight[ : encoder_config.hidden_size, : ] _SCREAMING_SNAKE_CASE = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] _SCREAMING_SNAKE_CASE = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) ->str: _SCREAMING_SNAKE_CASE = dct.pop(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = val def lowerCamelCase ( __lowerCamelCase : Dict ) ->Union[str, Any]: if "handwritten" in checkpoint_url: _SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: _SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" _SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("""RGB""" ) return im @torch.no_grad() def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ) ->int: _SCREAMING_SNAKE_CASE = ViTConfig(image_size=384 , qkv_bias=_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: _SCREAMING_SNAKE_CASE = 768 elif "large" in checkpoint_url: # use ViT-large encoder _SCREAMING_SNAKE_CASE = 1024 _SCREAMING_SNAKE_CASE = 4096 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 1024 else: raise ValueError("""Should either find \'base\' or \'large\' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = """relu""" _SCREAMING_SNAKE_CASE = 1024 _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False # load HuggingFace model _SCREAMING_SNAKE_CASE = ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = TrOCRForCausalLM(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase ) model.eval() # load state_dict of original model, rename some keys _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" , check_hash=_UpperCAmelCase )["""model"""] _SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): _SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase ) if key.startswith("""decoder""" ) and "output_projection" not in key: _SCREAMING_SNAKE_CASE = val else: _SCREAMING_SNAKE_CASE = val # load state dict model.load_state_dict(_UpperCAmelCase ) # Check outputs on an image _SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size ) _SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" ) _SCREAMING_SNAKE_CASE = TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase ) _SCREAMING_SNAKE_CASE = processor(images=prepare_img(_UpperCAmelCase ) , return_tensors="""pt""" ).pixel_values # verify logits _SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) _SCREAMING_SNAKE_CASE = model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = outputs.logits _SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_0265] ) if "trocr-base-handwritten" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1e-3 ), "First elements of logits not as expected" Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCAmelCase ) print(F'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowercase_ = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
58
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case :Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(f'{len(upper_files)} files contain uppercase characters:') print('''\n'''.join(upper_files) + '''\n''') __snake_case :Tuple = [file for file in filepaths if ''' ''' in file] if space_files: print(f'{len(space_files)} files contain space characters:') print('''\n'''.join(space_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if '''-''' in file] if hyphen_files: print(f'{len(hyphen_files)} files contain hyphen characters:') print('''\n'''.join(hyphen_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'{len(nodir_files)} files are not in a directory:') print('''\n'''.join(nodir_files) + '''\n''') __snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
49
0
'''simple docstring''' from __future__ import annotations from typing import Any def _UpperCamelCase ( UpperCamelCase__ ): if not postfix_notation: return 0 UpperCAmelCase__ : Optional[int] = {"""+""", """-""", """*""", """/"""} UpperCAmelCase__ : List[str] = [] for token in postfix_notation: if token in operations: UpperCAmelCase__ , UpperCAmelCase__ : str = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_UpperCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
163
from collections import defaultdict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = first_str.lower().strip() __a = second_str.lower().strip() # Remove whitespace __a = first_str.replace(''' ''' , '''''' ) __a = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False # Default values for count should be 0 __a = defaultdict(_UpperCAmelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(_UpperCAmelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __snake_case :Any = input('''Enter the first string ''').strip() __snake_case :int = input('''Enter the second string ''').strip() __snake_case :int = check_anagrams(input_a, input_b) print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
49
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "x" , __lowerCAmelCase = 10**-10 , __lowerCAmelCase = 1 , )-> List[str]: """simple docstring""" _UpperCAmelCase = symbols(_UpperCAmelCase ) _UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) ) _UpperCAmelCase = starting_point while True: if diff_function(_UpperCAmelCase ) != 0: _UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function( _UpperCAmelCase ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess _UpperCAmelCase = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial # Find fourth Root of 5 print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''') # Find value of e print( '''The root of log(y) - 1 = 0 is ''', F'''{newton_raphson('log(y) - 1', 2, variable='y')}''', ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', F'''{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}''', ) # Find root of cos(x) print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
39
import logging from transformers.configuration_utils import PretrainedConfig __snake_case :Any = logging.getLogger(__name__) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = '''masked_bert''' def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = pruning_method __a = mask_init __a = mask_scale
49
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase_ = '''focalnet''' def __init__(self , __a=2_24 , __a=4 , __a=3 , __a=96 , __a=False , __a=[1_92, 3_84, 7_68, 7_68] , __a=[2, 2, 6, 2] , __a=[2, 2, 2, 2] , __a=[3, 3, 3, 3] , __a="gelu" , __a=4.0 , __a=0.0 , __a=0.1 , __a=False , __a=1e-4 , __a=False , __a=False , __a=False , __a=0.02 , __a=1e-5 , __a=32 , __a=None , __a=None , **__a , ) -> Optional[Any]: super().__init__(**__SCREAMING_SNAKE_CASE ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = use_conv_embed UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = focal_levels UpperCamelCase = focal_windows UpperCamelCase = hidden_act UpperCamelCase = mlp_ratio UpperCamelCase = hidden_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = use_layerscale UpperCamelCase = layerscale_value UpperCamelCase = use_post_layernorm UpperCamelCase = use_post_layernorm_in_modulation UpperCamelCase = normalize_modulator UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = encoder_stride UpperCamelCase = ["stem"] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
153
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _A : UpperCamelCase__ : Optional[Union[str, Path]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = 1 UpperCamelCase__ : Optional[Union[str, bool]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
49
0
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCAmelCase ( a__ ) -> Optional[int]: __a = {} __a = tokenizer(example['''content'''] , truncation=_UpperCAmelCase )['''input_ids'''] __a = len(example['''content'''] ) / len(output['''input_ids'''] ) return output A : Any = HfArgumentParser(PretokenizationArguments) A : Dict = parser.parse_args() if args.num_workers is None: A : List[Any] = multiprocessing.cpu_count() A : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir) A : Optional[int] = time.time() A : Any = load_dataset(args.dataset_name, split='train') print(F"Dataset loaded in {time.time()-t_start:.2f}s") A : Optional[Any] = time.time() A : Tuple = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F"Dataset tokenized in {time.time()-t_start:.2f}s") A : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
6
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Union[str, Any] = logging.get_logger(__name__) __snake_case :Any = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = '''switch_transformers''' UpperCamelCase__ : Optional[Any] = ['''past_key_values'''] UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' __a = vocab_size __a = d_model __a = d_kv __a = d_ff __a = num_sparse_encoder_layers __a = num_layers __a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __a = self.num_layers // self.num_sparse_encoder_layers else: __a = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __a = self.num_decoder_layers // self.num_sparse_decoder_layers else: __a = self.num_decoder_layers # HACK: this will create 0 sparse layers __a = num_heads __a = num_experts __a = expert_capacity __a = router_bias __a = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}') __a = router_dtype __a = router_ignore_padding_tokens __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = dropout_rate __a = layer_norm_epsilon __a = initializer_factor __a = feed_forward_proj __a = use_cache __a = add_router_probs __a = router_z_loss_coef __a = router_aux_loss_coef __a = self.feed_forward_proj.split('''-''') __a = act_info[-1] __a = act_info[0] == '''gated''' if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''') # for backwards compatibility if feed_forward_proj == "gated-gelu": __a = '''gelu_new''' super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
49
0
from __future__ import annotations from collections import Counter from random import random class snake_case__ : """simple docstring""" def __init__( self ) -> Tuple: """simple docstring""" a__ : Optional[Any] = {} def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]: """simple docstring""" a__ : str = {} def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> str: """simple docstring""" if nodea not in self.connections: self.add_node(__SCREAMING_SNAKE_CASE ) if nodea not in self.connections: self.add_node(__SCREAMING_SNAKE_CASE ) a__ : List[str] = probability def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" return list(self.connections ) def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]: """simple docstring""" a__ : Any = 0 a__ : Optional[Any] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : List[str]) -> List[str]: """simple docstring""" a__ : int = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) a__ : Optional[Any] = Counter(graph.get_nodes()) a__ : List[Any] = start for _ in range(_UpperCAmelCase): a__ : str = graph.transition(_UpperCAmelCase) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
170
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex __snake_case :List[Any] = logging.getLogger(__name__) class _A : def __init__( self : List[str]): '''simple docstring''' __a = False def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' if not self.initialized: __a = RagRetriever( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , ) __a = True def _lowerCamelCase ( self : List[str]): '''simple docstring''' self.retriever.index.init_index() def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' __a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return doc_ids, retrieved_doc_embeds class _A ( __UpperCAmelCase ): def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None): '''simple docstring''' if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0: raise ValueError( '''When using Ray for distributed fine-tuning, ''' '''you\'ll need to provide the paths instead, ''' '''as the dataset and the index are loaded ''' '''separately. More info in examples/rag/use_own_knowledge_dataset.py ''') super().__init__( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , ) __a = retrieval_workers if len(self.retrieval_workers) > 0: ray.get( [ worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for worker in self.retrieval_workers ]) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' logger.info('''initializing retrieval''') if len(self.retrieval_workers) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' if len(self.retrieval_workers) > 0: # Select a random retrieval actor. __a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)] __a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) else: __a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]): '''simple docstring''' return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) @classmethod def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE) __a = rag_tokenizer.question_encoder __a = rag_tokenizer.generator if indexed_dataset is not None: __a = '''custom''' __a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE) else: __a = cls._build_index(__SCREAMING_SNAKE_CASE) return cls( __SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
49
0
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def a ( self : Tuple ) -> Tuple: __lowerCAmelCase = torch.nn.Linear(10 , 10 ) __lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 ) __lowerCAmelCase = Accelerator() __lowerCAmelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE ) try: pickle.loads(pickle.dumps(__SCREAMING_SNAKE_CASE ) ) except Exception as e: self.fail(f"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
229
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = BigBirdConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) if is_trivia_qa: __a = BigBirdForQuestionAnswering(_UpperCAmelCase ) else: __a = BigBirdForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--big_bird_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.''' ) __snake_case :Any = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
49
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {'''vocab_file''': '''spiece.model'''} _SCREAMING_SNAKE_CASE = { '''vocab_file''': { '''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''', } } class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ): def __init__( self : Optional[int] , _A : List[Any] , _A : int=False , _A : Optional[Any]=True , _A : Union[str, Any]=False , _A : Dict="<s>" , _A : int="</s>" , _A : Union[str, Any]="<unk>" , _A : Optional[Any]="<sep>" , _A : str="<pad>" , _A : Tuple="<cls>" , _A : int="<mask>" , _A : Dict=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : str , ) -> Optional[int]: """simple docstring""" snake_case_ : str = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token snake_case_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) snake_case_ : Any = 3 snake_case_ : int = do_lower_case snake_case_ : str = remove_space snake_case_ : Optional[int] = keep_accents snake_case_ : str = vocab_file snake_case_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( 'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ' 'See https://pypi.org/project/jieba/ for installation.' ) snake_case_ : str = jieba snake_case_ : Tuple = str.maketrans(' \n' , '\u2582\u2583' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return len(self.sp_model ) def UpperCAmelCase_ ( self : str ) -> Tuple: """simple docstring""" snake_case_ : Dict = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Optional[int]: """simple docstring""" snake_case_ : Union[str, Any] = self.__dict__.copy() snake_case_ : Any = None return state def __setstate__( self : Union[str, Any] , _A : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : List[str] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case_ : int = {} snake_case_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self : Optional[Any] , _A : List[Any] ) -> str: """simple docstring""" if self.remove_space: snake_case_ : Dict = ' '.join(inputs.strip().split() ) else: snake_case_ : Any = inputs snake_case_ : Tuple = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: snake_case_ : Dict = unicodedata.normalize('NFKD' , __SCREAMING_SNAKE_CASE ) snake_case_ : Optional[int] = ''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] ) if self.do_lower_case: snake_case_ : List[Any] = outputs.lower() return outputs def UpperCAmelCase_ ( self : Dict , _A : str ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = self.preprocess_text(__SCREAMING_SNAKE_CASE ) snake_case_ : int = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = [] for piece in pieces: if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): snake_case_ : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: snake_case_ : Tuple = cur_pieces[1:] else: snake_case_ : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__SCREAMING_SNAKE_CASE ) else: new_pieces.append(__SCREAMING_SNAKE_CASE ) return new_pieces def UpperCAmelCase_ ( self : Optional[Any] , _A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : Union[str, Any] ) -> Dict: """simple docstring""" return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase_ ( self : str , _A : List[str] ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = ''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ' ' ).strip() return out_string def UpperCAmelCase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> int: """simple docstring""" snake_case_ : Any = [self.sep_token_id] snake_case_ : str = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCAmelCase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> str: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] def UpperCAmelCase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> Tuple: """simple docstring""" snake_case_ : Tuple = [self.sep_token_id] snake_case_ : Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCAmelCase_ ( self : Optional[Any] , _A : str , _A : Optional[str] = None ) -> int: """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ : Union[str, Any] = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , 'wb' ) as fi: snake_case_ : int = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def UpperCAmelCase_ ( self : Any , *_A : int , **_A : List[Any] ) -> Any: """simple docstring""" snake_case_ : Any = super()._decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) snake_case_ : Optional[int] = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' ) return text
327
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase ) else: __a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase ) for i, tensor in enumerate(_UpperCAmelCase ): if padding_side == "right": if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] return out_tensor.tolist() def __snake_case ( _UpperCAmelCase ): __a = ord(_UpperCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __a = unicodedata.category(_UpperCAmelCase ) if cat.startswith('''P''' ): return True return False @dataclass class _A ( __UpperCAmelCase ): UpperCamelCase__ : PreTrainedTokenizerBase UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = -100 UpperCamelCase__ : str = "pt" def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' import torch __a = '''label''' if '''label''' in features[0].keys() else '''labels''' __a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __a = self.tokenizer.pad( __SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __a = torch.tensor(batch['''entity_ids''']).shape[1] __a = self.tokenizer.padding_side if padding_side == "right": __a = [ list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels ] else: __a = [ [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels ] __a = [feature['''ner_tags'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = [feature['''original_entity_spans'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()} return batch
49
0
"""simple docstring""" import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = [] if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for v in tree.values(): shapes.extend(_fetch_dims(_UpperCAmelCase ) ) elif isinstance(_UpperCAmelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(_UpperCAmelCase ) ) elif isinstance(_UpperCAmelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = [] for d in reversed(_UpperCAmelCase ): idx.append(flat_idx % d ) _UpperCAmelCase = flat_idx // d return tuple(reversed(_UpperCAmelCase ) ) @torch.jit.ignore def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : Optional[Any] = None , ): '''simple docstring''' def reduce_edge_list(_SCREAMING_SNAKE_CASE : Any ) -> None: _UpperCAmelCase = True for i in range(len(_UpperCAmelCase ) ): _UpperCAmelCase = -1 * (i + 1) l[reversed_idx] &= tally _UpperCAmelCase = l[reversed_idx] if start_edges is None: _UpperCAmelCase = [s == 0 for s in start] reduce_edge_list(_UpperCAmelCase ) if end_edges is None: _UpperCAmelCase = [e == (d - 1) for e, d in zip(_UpperCAmelCase , _UpperCAmelCase )] reduce_edge_list(_UpperCAmelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(_UpperCAmelCase ) == 0: return [()] elif len(_UpperCAmelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] _UpperCAmelCase = [] _UpperCAmelCase = [] # Dimensions common to start and end can be selected directly for s, e in zip(_UpperCAmelCase , _UpperCAmelCase ): if s == e: path_list.append(slice(_UpperCAmelCase , s + 1 ) ) else: break _UpperCAmelCase = tuple(_UpperCAmelCase ) _UpperCAmelCase = len(_UpperCAmelCase ) # start == end, and we're done if divergence_idx == len(_UpperCAmelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _UpperCAmelCase = start[divergence_idx] return tuple( path + (slice(_UpperCAmelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None _UpperCAmelCase = end[divergence_idx] return tuple( path + (slice(_UpperCAmelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) _UpperCAmelCase = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = t.shape[:no_batch_dims] _UpperCAmelCase = list(_flat_idx_to_idx(_UpperCAmelCase , _UpperCAmelCase ) ) # _get_minimal_slice_set is inclusive _UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , _UpperCAmelCase ) ) # Get an ordered list of slices to perform _UpperCAmelCase = _get_minimal_slice_set( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) _UpperCAmelCase = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict = False , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Dict = False , ): '''simple docstring''' if not (len(_UpperCAmelCase ) > 0): raise ValueError('''Must provide at least one input''' ) _UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(_UpperCAmelCase )] _UpperCAmelCase = tuple([max(_UpperCAmelCase ) for s in zip(*_UpperCAmelCase )] ) def _prep_inputs(_SCREAMING_SNAKE_CASE : Any ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: _UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) _UpperCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: _UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t _UpperCAmelCase = tensor_tree_map(_prep_inputs , _UpperCAmelCase ) _UpperCAmelCase = None if _out is not None: _UpperCAmelCase = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) _UpperCAmelCase = 1 for d in orig_batch_dims: flat_batch_dim *= d _UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(_SCREAMING_SNAKE_CASE : int ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t _UpperCAmelCase = 0 _UpperCAmelCase = prepped_outputs for _ in range(_UpperCAmelCase ): # Chunk the input if not low_mem: _UpperCAmelCase = _select_chunk else: _UpperCAmelCase = partial( _chunk_slice , flat_start=_UpperCAmelCase , flat_end=min(_UpperCAmelCase , i + chunk_size ) , no_batch_dims=len(_UpperCAmelCase ) , ) _UpperCAmelCase = tensor_tree_map(_UpperCAmelCase , _UpperCAmelCase ) # Run the layer on the chunk _UpperCAmelCase = layer(**_UpperCAmelCase ) # Allocate space for the output if out is None: _UpperCAmelCase = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _UpperCAmelCase ) # Put the chunk in its pre-allocated space if isinstance(_UpperCAmelCase , _UpperCAmelCase ): def assign(_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> None: for k, v in da.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): assign(_UpperCAmelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: _UpperCAmelCase = da[k] assign(_UpperCAmelCase , _UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): for xa, xa in zip(_UpperCAmelCase , _UpperCAmelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: _UpperCAmelCase = xa elif isinstance(_UpperCAmelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: _UpperCAmelCase = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size _UpperCAmelCase = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : t.view(orig_batch_dims + t.shape[1:] ) , _UpperCAmelCase ) return out class _a : """simple docstring""" def __init__( self : List[str] , __UpperCamelCase : int = 5_1_2 , )->Optional[Any]: _UpperCAmelCase = max_chunk_size _UpperCAmelCase = None _UpperCAmelCase = None def lowercase__ ( self : Tuple , __UpperCamelCase : Callable , __UpperCamelCase : tuple , __UpperCamelCase : int )->Dict: logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size _UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] _UpperCAmelCase = [c for c in candidates if c > min_chunk_size] _UpperCAmelCase = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__UpperCamelCase : int ) -> bool: try: with torch.no_grad(): fn(*__SCREAMING_SNAKE_CASE , chunk_size=__SCREAMING_SNAKE_CASE ) return True except RuntimeError: return False _UpperCAmelCase = 0 _UpperCAmelCase = len(__SCREAMING_SNAKE_CASE ) - 1 while i > min_viable_chunk_size_index: _UpperCAmelCase = test_chunk_size(candidates[i] ) if not viable: _UpperCAmelCase = (min_viable_chunk_size_index + i) // 2 else: _UpperCAmelCase = i _UpperCAmelCase = (i + len(__SCREAMING_SNAKE_CASE ) - 1) // 2 return candidates[min_viable_chunk_size_index] def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Iterable , __UpperCamelCase : Iterable )->int: _UpperCAmelCase = True for aa, aa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert type(__SCREAMING_SNAKE_CASE ) == type(__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ): consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda __UpperCamelCase : x[0] )] _UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda __UpperCamelCase : x[0] )] consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: consistent &= aa == aa return consistent def lowercase__ ( self : List[Any] , __UpperCamelCase : Callable , __UpperCamelCase : tuple , __UpperCamelCase : int , )->List[Any]: _UpperCAmelCase = True _UpperCAmelCase = tree_map(lambda __UpperCamelCase : a.shape if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) else a , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , __SCREAMING_SNAKE_CASE ) else: # Otherwise, we can reuse the precomputed value _UpperCAmelCase = False if not consistent: _UpperCAmelCase = self._determine_favorable_chunk_size( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
260
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def __snake_case ( ): __a , __a = 9, 14 # noqa: F841 __a = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __a = defaultdict(_UpperCAmelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __a = mst(_UpperCAmelCase ) __a = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __a = tuple(answer[:2] ) __a = tuple(edge[::-1] ) assert edge in result or reverse in result
49
0
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _A : List[str] , _A : int , _A : List[str] ): """simple docstring""" lowerCamelCase__ : Tuple = LxmertConfig.from_json_file(_UpperCAmelCase ) print(F"Building PyTorch model from configuration: {config}" ) lowerCamelCase__ : str = LxmertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
184
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _A ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] __a = '''fp16''' self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] __a = '''fp16''' self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __a = '''fp16''' self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
49
0
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
25
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __snake_case :Dict = '''bart''' __snake_case :Tuple = True @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): if LOAD_DENSE_INDEX: __a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __a = qar_model.eval() else: __a , __a = (None, None) if MODEL_TYPE == "bart": __a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __a = sas_model.eval() else: __a , __a = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): if LOAD_DENSE_INDEX: __a = faiss.StandardGpuResources() __a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __a = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __a = faiss.IndexFlatIP(128 ) __a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase ) wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU else: __a , __a = (None, None) __a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_UpperCAmelCase ) def __snake_case ( ): __a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __a = elia['''train_eli5'''] __a = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __a = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_UpperCAmelCase ) return (elia_train, eli5_train_q_index) __snake_case ,__snake_case ,__snake_case :List[str] = load_indexes() __snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models() __snake_case ,__snake_case :Tuple = load_train_data() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ): __a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase ) __a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase ) __a = [elia_train[int(_UpperCAmelCase )] for i in I[0]] return nn_examples def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ): if source == "none": __a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __a , __a = query_qa_dense_index( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: __a , __a = query_es_index( _UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , ) __a = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _UpperCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None), } ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ): with torch.no_grad(): __a = qa_sas_generate( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar __snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' __snake_case :int = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __snake_case :int = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) __snake_case :Union[str, Any] = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] __snake_case :int = st.sidebar.checkbox('''Demo options''') if demo_options: __snake_case :str = st.sidebar.selectbox( '''''', action_list, index=3, ) __snake_case :Tuple = action_list.index(action_st) __snake_case :Optional[int] = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) __snake_case :Dict = show_type == '''Show full text of passages''' else: __snake_case :Dict = 3 __snake_case :str = True __snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: __snake_case :List[str] = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) __snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) __snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: __snake_case :Optional[int] = '''wiki40b''' __snake_case :Dict = '''dense''' __snake_case :Dict = '''beam''' __snake_case :int = 2 __snake_case :str = 64 __snake_case :Tuple = 256 __snake_case :int = None __snake_case :List[Any] = None __snake_case :int = st.sidebar.checkbox('''Generation options''') if generate_options: __snake_case :Tuple = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) __snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) __snake_case :Dict = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __snake_case :Dict = st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __snake_case :Tuple = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) __snake_case :Any = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) __snake_case :Any = None # start main text __snake_case :Dict = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] __snake_case :int = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": __snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''') else: __snake_case :Optional[int] = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": __snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10) __snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10) __snake_case :Optional[Any] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __snake_case :Union[str, Any] = support_list[:10] __snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: __snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __snake_case ,__snake_case :Optional[int] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): __snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) __snake_case :int = res[1].strip() if sec_titles == "": __snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url) else: __snake_case :Optional[int] = sec_titles.split(''' & ''') __snake_case :str = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: __snake_case :str = find_nearest_training(question) __snake_case :str = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) __snake_case :Optional[Any] = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) __snake_case :Tuple = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
49
0
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class a_ ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = XLMProphetNetTokenizer UpperCamelCase = False UpperCamelCase = True def snake_case_( self ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing _SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = """[PAD]""" _SCREAMING_SNAKE_CASE = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """[PAD]""" ) self.assertEqual(vocab_keys[1] , """[CLS]""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1012 ) def snake_case_( self ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """[UNK]""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """[UNK]""", """.""", ] , ) @cached_property def snake_case_( self ) -> int: return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" ) @slow def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = """Hello World!""" _SCREAMING_SNAKE_CASE = [3_5389, 6672, 49, 2] self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = {"""input_ids""": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
58
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _A ( __UpperCAmelCase ): def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = eval_examples __a = post_process_function def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = gen_kwargs.copy() __a = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length ) __a = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams ) __a = gen_kwargs __a = self.eval_dataset if eval_dataset is None else eval_dataset __a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE) __a = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) else: __a = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE) return metrics def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = gen_kwargs.copy() __a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE) # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''') __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
49
0
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ = 4_0_0_0_0_0_0 ): UpperCAmelCase__ : List[str] = [0, 1] UpperCAmelCase__ : Tuple = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 UpperCAmelCase__ : Tuple = 0 for j in range(len(_UpperCAmelCase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
163
from __future__ import annotations from typing import Any def __snake_case ( _UpperCAmelCase ): if not postfix_notation: return 0 __a = {'''+''', '''-''', '''*''', '''/'''} __a = [] for token in postfix_notation: if token in operations: __a , __a = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_UpperCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
49
0
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _a = False _a = logging.get_logger(__name__) _a = '''ybelkada/fonts''' def __A ( )-> str: """simple docstring""" if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """ 'Pix2StructImageProcessor. Please upgrade torch.' ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any: """simple docstring""" requires_backends(_UpperCAmelCase , ['torch'] ) _check_torch_version() _UpperCAmelCase = image_tensor.unsqueeze(0 ) _UpperCAmelCase = torch.nn.functional.unfold(_UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) _UpperCAmelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _UpperCAmelCase , _UpperCAmelCase , -1 ) _UpperCAmelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def __A ( __lowerCAmelCase , __lowerCAmelCase = 36 , __lowerCAmelCase = "black" , __lowerCAmelCase = "white" , __lowerCAmelCase = 5 , __lowerCAmelCase = 5 , __lowerCAmelCase = 5 , __lowerCAmelCase = 5 , __lowerCAmelCase = None , __lowerCAmelCase = None , )-> Union[str, Any]: """simple docstring""" requires_backends(_UpperCAmelCase , 'vision' ) # Add new lines so that each line is no more than 80 characters. _UpperCAmelCase = textwrap.TextWrapper(width=80 ) _UpperCAmelCase = wrapper.wrap(text=_UpperCAmelCase ) _UpperCAmelCase = '\n'.join(_UpperCAmelCase ) if font_bytes is not None and font_path is None: _UpperCAmelCase = io.BytesIO(_UpperCAmelCase ) elif font_path is not None: _UpperCAmelCase = font_path else: _UpperCAmelCase = hf_hub_download(_UpperCAmelCase , 'Arial.TTF' ) _UpperCAmelCase = ImageFont.truetype(_UpperCAmelCase , encoding='UTF-8' , size=_UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. _UpperCAmelCase = ImageDraw.Draw(Image.new('RGB' , (1, 1) , _UpperCAmelCase ) ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = temp_draw.textbbox((0, 0) , _UpperCAmelCase , _UpperCAmelCase ) # Create the actual image with a bit of padding around the text. _UpperCAmelCase = text_width + left_padding + right_padding _UpperCAmelCase = text_height + top_padding + bottom_padding _UpperCAmelCase = Image.new('RGB' , (image_width, image_height) , _UpperCAmelCase ) _UpperCAmelCase = ImageDraw.Draw(_UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=_UpperCAmelCase , fill=_UpperCAmelCase , font=_UpperCAmelCase ) return image def __A ( __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )-> List[str]: """simple docstring""" requires_backends(_UpperCAmelCase , 'vision' ) # Convert to PIL image if necessary _UpperCAmelCase = to_pil_image(_UpperCAmelCase ) _UpperCAmelCase = render_text(_UpperCAmelCase , **_UpperCAmelCase ) _UpperCAmelCase = max(header_image.width , image.width ) _UpperCAmelCase = int(image.height * (new_width / image.width) ) _UpperCAmelCase = int(header_image.height * (new_width / header_image.width) ) _UpperCAmelCase = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary _UpperCAmelCase = to_numpy_array(_UpperCAmelCase ) if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST: _UpperCAmelCase = to_channel_dimension_format(_UpperCAmelCase , ChannelDimension.LAST ) return new_image class __lowerCamelCase ( __UpperCAmelCase): """simple docstring""" UpperCamelCase__ = ['''flattened_patches'''] def __init__( self , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = False , **UpperCAmelCase , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = patch_size if patch_size is not None else {'height': 16, 'width': 16} _UpperCAmelCase = do_normalize _UpperCAmelCase = do_convert_rgb _UpperCAmelCase = max_patches _UpperCAmelCase = is_vqa def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" requires_backends(self.extract_flattened_patches , 'torch' ) _check_torch_version() # convert to torch _UpperCAmelCase = to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.FIRST ) _UpperCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = patch_size['height'], patch_size['width'] _UpperCAmelCase , _UpperCAmelCase = get_image_size(__SCREAMING_SNAKE_CASE ) # maximize scale s.t. _UpperCAmelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) _UpperCAmelCase = max(min(math.floor(scale * image_height / patch_height ) , __SCREAMING_SNAKE_CASE ) , 1 ) _UpperCAmelCase = max(min(math.floor(scale * image_width / patch_width ) , __SCREAMING_SNAKE_CASE ) , 1 ) _UpperCAmelCase = max(num_feasible_rows * patch_height , 1 ) _UpperCAmelCase = max(num_feasible_cols * patch_width , 1 ) _UpperCAmelCase = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=__SCREAMING_SNAKE_CASE , antialias=__SCREAMING_SNAKE_CASE , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] _UpperCAmelCase = torch_extract_patches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCAmelCase = patches.shape _UpperCAmelCase = patches_shape[1] _UpperCAmelCase = patches_shape[2] _UpperCAmelCase = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] _UpperCAmelCase = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] _UpperCAmelCase = torch.arange(__SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , __SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] ) _UpperCAmelCase = torch.arange(__SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(__SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] _UpperCAmelCase = row_ids.to(torch.floataa ) _UpperCAmelCase = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] _UpperCAmelCase = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] _UpperCAmelCase = torch.nn.functional.pad(__SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float() _UpperCAmelCase = to_numpy_array(__SCREAMING_SNAKE_CASE ) return result def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ): """simple docstring""" if image.dtype == np.uinta: _UpperCAmelCase = image.astype(np.floataa ) # take mean across the whole `image` _UpperCAmelCase = np.mean(__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.std(__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = max(__SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ): """simple docstring""" _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _UpperCAmelCase = patch_size if patch_size is not None else self.patch_size _UpperCAmelCase = max_patches if max_patches is not None else self.max_patches _UpperCAmelCase = self.is_vqa if kwargs.get('data_format' , __SCREAMING_SNAKE_CASE ) is not None: raise ValueError('data_format is not an accepted input as the outputs are ' ) _UpperCAmelCase = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _UpperCAmelCase = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images] # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.' ) _UpperCAmelCase = kwargs.pop('font_bytes' , __SCREAMING_SNAKE_CASE ) _UpperCAmelCase = kwargs.pop('font_path' , __SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = [header_text] * len(__SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [ render_header(__SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=__SCREAMING_SNAKE_CASE , font_path=__SCREAMING_SNAKE_CASE ) for i, image in enumerate(__SCREAMING_SNAKE_CASE ) ] if do_normalize: _UpperCAmelCase = [self.normalize(image=__SCREAMING_SNAKE_CASE ) for image in images] # convert to torch tensor and permute _UpperCAmelCase = [ self.extract_flattened_patches(image=__SCREAMING_SNAKE_CASE , max_patches=__SCREAMING_SNAKE_CASE , patch_size=__SCREAMING_SNAKE_CASE ) for image in images ] # create attention mask in numpy _UpperCAmelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] _UpperCAmelCase = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_outputs
39
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __snake_case :Optional[int] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __snake_case :List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __snake_case :List[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = random.randint(0 , len(_UpperCAmelCase ) - 1 ) __a = parent_a[:random_slice] + parent_a[random_slice:] __a = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __a = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): __a = [] # Generate more children proportionally to the fitness score. __a = int(parent_a[1] * 100 ) + 1 __a = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): __a = population_score[random.randint(0 , _UpperCAmelCase )][0] __a , __a = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __a = f'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. __a = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __a = f'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. __a = [] for _ in range(_UpperCAmelCase ): population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. __a , __a = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. __a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'\nGeneration: {generation}' f'\nTotal Population:{total_population}' f'\nBest score: {population_score[0][1]}' f'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __a = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. __a = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __snake_case :Optional[int] = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __snake_case :List[Any] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
49
0
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCamelCase ( __UpperCAmelCase , unittest.TestCase ): UpperCAmelCase_ = LEDTokenizer UpperCAmelCase_ = LEDTokenizerFast UpperCAmelCase_ = True def snake_case_ (self ) -> Optional[Any]: super().setUp() UpperCamelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCamelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCamelCase = {"unk_token": "<unk>"} UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__SCREAMING_SNAKE_CASE ) ) def snake_case_ (self , **__a ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def snake_case_ (self , **__a ) -> Dict: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def snake_case_ (self , __a ) -> Optional[Any]: return "lower newer", "lower newer" @cached_property def snake_case_ (self ) -> Tuple: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def snake_case_ (self ) -> Optional[Any]: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCamelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @require_torch def snake_case_ (self ) -> List[str]: UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertIn("input_ids" , __SCREAMING_SNAKE_CASE ) self.assertIn("attention_mask" , __SCREAMING_SNAKE_CASE ) self.assertNotIn("labels" , __SCREAMING_SNAKE_CASE ) self.assertNotIn("decoder_attention_mask" , __SCREAMING_SNAKE_CASE ) @require_torch def snake_case_ (self ) -> Tuple: UpperCamelCase = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def snake_case_ (self ) -> Optional[int]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = ["A long paragraph for summarization."] UpperCamelCase = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt" ) UpperCamelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="pt" ) UpperCamelCase = inputs["input_ids"] UpperCamelCase = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def snake_case_ (self ) -> int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = ["Summary of the text.", "Another summary."] UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] UpperCamelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE ) UpperCamelCase = [[0] * len(__SCREAMING_SNAKE_CASE ) for x in encoded_output["input_ids"]] UpperCamelCase = tokenizer.pad(__SCREAMING_SNAKE_CASE ) self.assertSequenceEqual(outputs["global_attention_mask"] , __SCREAMING_SNAKE_CASE ) def snake_case_ (self ) -> Optional[Any]: pass def snake_case_ (self ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase = "A, <mask> AllenNLP sentence." UpperCamelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE ) UpperCamelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
153
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model __a = LxmertConfig.from_json_file(_UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) __a = LxmertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": __snake_case :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __snake_case :Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
49
0
import copy import random from transformers import CLIPTokenizer class __A( __UpperCAmelCase ): def __init__( self , *_snake_case , **_snake_case ) -> str: '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __a = {} def SCREAMING_SNAKE_CASE_ ( self , _snake_case , *_snake_case , **_snake_case ) -> List[Any]: '''simple docstring''' __a = super().add_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if num_added_tokens == 0: raise ValueError( F"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" ''' `placeholder_token` that is not already in the tokenizer.''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , *_snake_case , _snake_case=1 , **_snake_case ) -> List[str]: '''simple docstring''' __a = [] if num_vec_per_token == 1: self.try_adding_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) output.append(__SCREAMING_SNAKE_CASE ) else: __a = [] for i in range(__SCREAMING_SNAKE_CASE ): __a = placeholder_token + F"""_{i}""" self.try_adding_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) output.append(__SCREAMING_SNAKE_CASE ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"""The tokenizer already has placeholder token {token} that can get confused with""" F""" {placeholder_token}keep placeholder tokens independent""" ) __a = output def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False , _snake_case=1.0 ) -> Optional[int]: '''simple docstring''' if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __a = [] for i in range(len(__SCREAMING_SNAKE_CASE ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__SCREAMING_SNAKE_CASE ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: __a = self.token_map[placeholder_token] __a = tokens[: 1 + int(len(__SCREAMING_SNAKE_CASE ) * prop_tokens_to_load )] if vector_shuffle: __a = copy.copy(__SCREAMING_SNAKE_CASE ) random.shuffle(__SCREAMING_SNAKE_CASE ) __a = text.replace(__SCREAMING_SNAKE_CASE , ''' '''.join(__SCREAMING_SNAKE_CASE ) ) return text def __call__( self , _snake_case , *_snake_case , _snake_case=False , _snake_case=1.0 , **_snake_case ) -> str: '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( __SCREAMING_SNAKE_CASE , vector_shuffle=__SCREAMING_SNAKE_CASE , prop_tokens_to_load=__SCREAMING_SNAKE_CASE ) , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , *_snake_case , _snake_case=False , _snake_case=1.0 , **_snake_case ) -> Tuple: '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( __SCREAMING_SNAKE_CASE , vector_shuffle=__SCREAMING_SNAKE_CASE , prop_tokens_to_load=__SCREAMING_SNAKE_CASE ) , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
6
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ): __a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __a = f'{olid} is not a valid Open Library olid' raise ValueError(_UpperCAmelCase ) return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json() def __snake_case ( _UpperCAmelCase ): __a = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __a = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __a = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = ''', '''.join(_UpperCAmelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(f'\nSearching Open Library for ISBN: {isbn}...\n') try: __snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}')) print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'Sorry, there are no results for ISBN: {isbn}.')
49
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : str ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] =['''YolosFeatureExtractor'''] _lowercase : Optional[Any] =['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] =[ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _lowercase : Dict =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
170
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _A ( __UpperCAmelCase ): def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths} __a = Text( cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : List[str]): '''simple docstring''' if self.streaming: __a = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: __a = None __a = None __a = None __a = None self.builder.download_and_prepare( download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) __a = self.builder.as_dataset( split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory) return dataset
49
0
'''simple docstring''' from __future__ import annotations def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : int ) -> List[str]: '''simple docstring''' if len(_UpperCAmelCase ) == 0: raise ValueError("""find_max() arg is an empty sequence""" ) if ( left >= len(_UpperCAmelCase ) or left < -len(_UpperCAmelCase ) or right >= len(_UpperCAmelCase ) or right < -len(_UpperCAmelCase ) ): raise IndexError("""list index out of range""" ) if left == right: return nums[left] __lowerCAmelCase = (left + right) >> 1 # the middle __lowerCAmelCase = find_max(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # find max in range[left, mid] __lowerCAmelCase = find_max(_UpperCAmelCase , mid + 1 , _UpperCAmelCase ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
229
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __snake_case :List[str] = '''\ Text data. Second line of data.''' __snake_case :Optional[Any] = '''file''' @pytest.fixture(scope='''session''' ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') __a = bytes(_UpperCAmelCase , '''utf-8''' ) with zstd.open(_UpperCAmelCase , '''wb''' ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def __snake_case ( _UpperCAmelCase ): with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} __a = input_paths[compression_format] __a = tmp_path / '''cache''' __a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: __a = f.read() with open(_UpperCAmelCase ) as f: __a = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = '''custom_cache''' __a = '''custom_extracted_dir''' __a = tmp_path / '''custom_extracted_path''' if default_extracted: __a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) ) __a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __a = xz_file __a = ( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase ) ) __a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path __a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def __snake_case ( _UpperCAmelCase ): # absolute path __a = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path __a = '''./__missing_file__.txt''' with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = get_from_cache(f'tmp://{tmpfs_file}' ) with open(_UpperCAmelCase ) as f: __a = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( ): with pytest.raises(_UpperCAmelCase ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase ) def __snake_case ( _UpperCAmelCase ): __a = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_UpperCAmelCase ): fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head('''s3://huggingface.co''' )
49
0
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _SCREAMING_SNAKE_CASE = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def SCREAMING_SNAKE_CASE__ ( __a , __a ): return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : List[Any] = _TestCommandArgs(dataset=_UpperCAmelCase , all_configs=_UpperCAmelCase , save_infos=_UpperCAmelCase ) snake_case_ : Optional[int] = TestCommand(*_UpperCAmelCase ) test_command.run() snake_case_ : List[str] = os.path.join(_UpperCAmelCase , 'README.md' ) assert os.path.exists(_UpperCAmelCase ) snake_case_ : Dict = DatasetInfosDict.from_directory(_UpperCAmelCase ) snake_case_ : Tuple = DatasetInfosDict( { 'default': DatasetInfo( features=Features( { 'tokens': Sequence(Value('string' ) ), 'ner_tags': Sequence( ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ), 'langs': Sequence(Value('string' ) ), 'spans': Sequence(Value('string' ) ), } ) , splits=[ { 'name': 'train', 'num_bytes': 2_35_15_63, 'num_examples': 1_00_00, }, { 'name': 'validation', 'num_bytes': 23_84_18, 'num_examples': 10_00, }, ] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: snake_case_ ,snake_case_ : int = getattr(dataset_infos['default'] , _UpperCAmelCase ), getattr(expected_dataset_infos['default'] , _UpperCAmelCase ) if key == "num_bytes": assert is_apercent_close(_UpperCAmelCase , _UpperCAmelCase ) elif key == "splits": assert list(_UpperCAmelCase ) == list(_UpperCAmelCase ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
327
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _A ( __UpperCAmelCase ): UpperCamelCase__ : Tuple = (DDPMParallelScheduler,) def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]): '''simple docstring''' __a = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE) return config def _lowerCamelCase ( self : List[str]): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Tuple): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5 def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = self.dummy_sample_deter + 0.1 __a = self.dummy_sample_deter - 0.1 __a = samplea.shape[0] __a = torch.stack([samplea, samplea, samplea] , dim=0) __a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE) __a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) __a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1)) __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 11_53.18_33) < 1E-2 assert abs(result_mean.item() - 0.50_05) < 1E-3 def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_58.96_06) < 1E-2 assert abs(result_mean.item() - 0.33_72) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config(prediction_type='''v_prediction''') __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = len(__SCREAMING_SNAKE_CASE) __a = self.dummy_model() __a = self.dummy_sample_deter __a = torch.manual_seed(0) for t in reversed(range(__SCREAMING_SNAKE_CASE)): # 1. predict noise residual __a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 __a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample __a = pred_prev_sample __a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE)) __a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 2_02.02_96) < 1E-2 assert abs(result_mean.item() - 0.26_31) < 1E-3 def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) __a = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE): if i == len(__SCREAMING_SNAKE_CASE) - 1: __a = -1 else: __a = timesteps[i + 1] __a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE) __a = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [100, 87, 50, 1, 0] __a = len(__SCREAMING_SNAKE_CASE) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.scheduler_classes[0] __a = self.get_scheduler_config() __a = scheduler_class(**__SCREAMING_SNAKE_CASE) __a = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import unittest from knapsack import greedy_knapsack as kp class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : List[Any] )->List[Any]: _UpperCAmelCase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0] _UpperCAmelCase = [2, 4, 6, 8, 1_0, 1_2] _UpperCAmelCase = 1_0_0 self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_1_0 ) def lowercase__ ( self : Dict )->Optional[Any]: self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' ) def lowercase__ ( self : Any )->List[Any]: self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' ) def lowercase__ ( self : Dict )->List[Any]: self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' ) def lowercase__ ( self : List[str] )->Tuple: self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' ) def lowercase__ ( self : Any )->Optional[Any]: self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' ) if __name__ == "__main__": unittest.main()
260
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __snake_case :List[Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) requires_backends(self , '''vision''') requires_backends(self , '''torch''') if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.') self.check_model_type(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = {} __a = {} __a = {} # preprocess args if "points_per_batch" in kwargs: __a = kwargs['''points_per_batch'''] if "points_per_crop" in kwargs: __a = kwargs['''points_per_crop'''] if "crops_n_layers" in kwargs: __a = kwargs['''crops_n_layers'''] if "crop_overlap_ratio" in kwargs: __a = kwargs['''crop_overlap_ratio'''] if "crop_n_points_downscale_factor" in kwargs: __a = kwargs['''crop_n_points_downscale_factor'''] # postprocess args if "pred_iou_thresh" in kwargs: __a = kwargs['''pred_iou_thresh'''] if "stability_score_offset" in kwargs: __a = kwargs['''stability_score_offset'''] if "mask_threshold" in kwargs: __a = kwargs['''mask_threshold'''] if "stability_score_thresh" in kwargs: __a = kwargs['''stability_score_thresh'''] if "crops_nms_thresh" in kwargs: __a = kwargs['''crops_nms_thresh'''] if "output_rle_mask" in kwargs: __a = kwargs['''output_rle_mask'''] if "output_bboxes_mask" in kwargs: __a = kwargs['''output_bboxes_mask'''] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ): '''simple docstring''' __a = load_image(__SCREAMING_SNAKE_CASE) __a = self.image_processor.size['''longest_edge'''] __a , __a , __a , __a = self.image_processor.generate_crop_boxes( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') with self.device_placement(): if self.framework == "pt": __a = self.get_inference_context() with inference_context(): __a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''')) __a = image_embeddings __a = grid_points.shape[1] __a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( '''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ''' '''To return all points at once, set points_per_batch to None''') for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = grid_points[:, i : i + points_per_batch, :, :] __a = input_labels[:, i : i + points_per_batch] __a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ): '''simple docstring''' __a = model_inputs.pop('''input_boxes''') __a = model_inputs.pop('''is_last''') __a = model_inputs.pop('''original_sizes''').tolist() __a = model_inputs.pop('''reshaped_input_sizes''').tolist() __a = self.model(**__SCREAMING_SNAKE_CASE) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __a = model_outputs['''pred_masks'''] __a = self.image_processor.post_process_masks( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE) __a = model_outputs['''iou_scores'''] __a , __a , __a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ): '''simple docstring''' __a = [] __a = [] __a = [] for model_output in model_outputs: all_scores.append(model_output.pop('''iou_scores''')) all_masks.extend(model_output.pop('''masks''')) all_boxes.append(model_output.pop('''boxes''')) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a , __a , __a , __a = self.image_processor.post_process_for_mask_generation( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = defaultdict(__SCREAMING_SNAKE_CASE) for output in model_outputs: for k, v in output.items(): extra[k].append(__SCREAMING_SNAKE_CASE) __a = {} if output_rle_mask: __a = rle_mask if output_bboxes_mask: __a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
49
0
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) A : Tuple = logging.getLogger() A : Optional[int] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _lowercase ( __UpperCAmelCase): """simple docstring""" def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) lowerCamelCase__ : List[str] = {"source": "What is love ?", "target": "life"} lowerCamelCase__ : Any = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: lowerCamelCase__ : List[Any] = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(__SCREAMING_SNAKE_CASE , f"{split}.{field}" ) , "w" ) as f: f.write(__SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : str = "pytorch" ): '''simple docstring''' lowerCamelCase__ : int = self.get_auto_remove_tmp_dir() lowerCamelCase__ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , "output" ) lowerCamelCase__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , "data" ) self._create_dummy_data(data_dir=__SCREAMING_SNAKE_CASE ) lowerCamelCase__ : List[Any] = f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split() if gpus > 0: testargs.append(f"--gpus={gpus}" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) lowerCamelCase__ : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() ) lowerCamelCase__ : str = os.path.join(__SCREAMING_SNAKE_CASE , "metrics.json" ) with open(__SCREAMING_SNAKE_CASE ) as f: lowerCamelCase__ : str = json.load(__SCREAMING_SNAKE_CASE ) return result @require_torch_gpu def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : List[str] = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[str] = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : str = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
184
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __snake_case :str = logging.get_logger(__name__) __snake_case :int = {'''vocab_file''': '''vocab.txt'''} __snake_case :List[Any] = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __snake_case :List[str] = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __snake_case :Optional[int] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : int = ConvBertTokenizer def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __a = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars ): __a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''')) __a = do_lower_case __a = strip_accents __a = tokenize_chinese_chars __a = normalizer_class(**__SCREAMING_SNAKE_CASE) __a = do_lower_case def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None): '''simple docstring''' __a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None): '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None): '''simple docstring''' __a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE) return tuple(__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import numpy as np import qiskit def lowercase_ ( _snake_case = 8 ,_snake_case = None ): SCREAMING_SNAKE_CASE__ : Any = np.random.default_rng(seed=_UpperCAmelCase ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. SCREAMING_SNAKE_CASE__ : Union[str, Any] = 6 * key_len # Measurement basis for Alice's qubits. SCREAMING_SNAKE_CASE__ : List[Any] = rng.integers(2 ,size=_UpperCAmelCase ) # The set of states Alice will prepare. SCREAMING_SNAKE_CASE__ : Optional[int] = rng.integers(2 ,size=_UpperCAmelCase ) # Measurement basis for Bob's qubits. SCREAMING_SNAKE_CASE__ : Any = rng.integers(2 ,size=_UpperCAmelCase ) # Quantum Circuit to simulate BB84 SCREAMING_SNAKE_CASE__ : Any = qiskit.QuantumCircuit(_UpperCAmelCase ,name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_UpperCAmelCase ): if alice_state[index] == 1: bbaa_circ.x(_UpperCAmelCase ) if alice_basis[index] == 1: bbaa_circ.h(_UpperCAmelCase ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_UpperCAmelCase ): if bob_basis[index] == 1: bbaa_circ.h(_UpperCAmelCase ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. SCREAMING_SNAKE_CASE__ : int = qiskit.execute(_UpperCAmelCase ,_UpperCAmelCase ,shots=1 ,seed_simulator=_UpperCAmelCase ) # Returns the result of measurement. SCREAMING_SNAKE_CASE__ : Union[str, Any] = job.result().get_counts(_UpperCAmelCase ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. SCREAMING_SNAKE_CASE__ : str = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. SCREAMING_SNAKE_CASE__ : Any = gen_key[:key_len] if len(_UpperCAmelCase ) >= key_len else gen_key.ljust(_UpperCAmelCase ,"""0""" ) return key if __name__ == "__main__": print(f"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
25
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __snake_case :Any = logging.get_logger(__name__) __snake_case :Optional[Any] = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } __snake_case :List[Any] = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def __snake_case ( _UpperCAmelCase ): __a = EfficientNetConfig() __a = CONFIG_MAP[model_name]['''hidden_dim'''] __a = CONFIG_MAP[model_name]['''width_coef'''] __a = CONFIG_MAP[model_name]['''depth_coef'''] __a = CONFIG_MAP[model_name]['''image_size'''] __a = CONFIG_MAP[model_name]['''dropout_rate'''] __a = CONFIG_MAP[model_name]['''dw_padding'''] __a = '''huggingface/label-files''' __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} return config def __snake_case ( ): __a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im def __snake_case ( _UpperCAmelCase ): __a = CONFIG_MAP[model_name]['''image_size'''] __a = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , ) return preprocessor def __snake_case ( _UpperCAmelCase ): __a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] __a = sorted(set(_UpperCAmelCase ) ) __a = len(_UpperCAmelCase ) __a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )} __a = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: __a = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) __a = {} for item in rename_keys: if item[0] in original_param_names: __a = '''efficientnet.''' + item[1] __a = '''classifier.weight''' __a = '''classifier.bias''' return key_mapping def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for key, value in tf_params.items(): if "normalization" in key: continue __a = key_mapping[key] if "_conv" in key and "kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __a = torch.from_numpy(np.transpose(_UpperCAmelCase ) ) else: __a = torch.from_numpy(_UpperCAmelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCAmelCase ) @torch.no_grad() def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = model_classes[model_name]( include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , ) __a = original_model.trainable_variables __a = original_model.non_trainable_variables __a = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __a = param.numpy() __a = list(tf_params.keys() ) # Load HuggingFace model __a = get_efficientnet_config(_UpperCAmelCase ) __a = EfficientNetForImageClassification(_UpperCAmelCase ).eval() __a = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) __a = rename_keys(_UpperCAmelCase ) replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Initialize preprocessor and preprocess input image __a = convert_image_processor(_UpperCAmelCase ) __a = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): __a = hf_model(**_UpperCAmelCase ) __a = outputs.logits.detach().numpy() # Original model inference __a = False __a = CONFIG_MAP[model_name]['''image_size'''] __a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __a = image.img_to_array(_UpperCAmelCase ) __a = np.expand_dims(_UpperCAmelCase , axis=0 ) __a = original_model.predict(_UpperCAmelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCAmelCase ): os.mkdir(_UpperCAmelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCAmelCase ) preprocessor.save_pretrained(_UpperCAmelCase ) if push_to_hub: # Push model and image processor to hub print(f'Pushing converted {model_name} to the hub...' ) __a = f'efficientnet-{model_name}' preprocessor.push_to_hub(_UpperCAmelCase ) hf_model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": __snake_case :int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') __snake_case :Optional[int] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
49
0
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py lowercase_ = '''src/transformers''' lowercase_ = '''docs/source/en''' lowercase_ = '''.''' def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) ->Dict: with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _SCREAMING_SNAKE_CASE = f.readlines() # Find the start prompt. _SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(_UpperCAmelCase ): start_index += 1 start_index += 1 _SCREAMING_SNAKE_CASE = start_index while not lines[end_index].startswith(_UpperCAmelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | lowercase_ = '''Model|Encoder|Decoder|ForConditionalGeneration''' # Regexes that match TF/Flax/PT model names. lowercase_ = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") lowercase_ = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. lowercase_ = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. lowercase_ = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase ( __lowerCamelCase : Union[str, Any] ) ->List[str]: _SCREAMING_SNAKE_CASE = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _UpperCAmelCase ) return [m.group(0 ) for m in matches] def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : int ) ->List[str]: _SCREAMING_SNAKE_CASE = 2 if text == """✅""" or text == """❌""" else len(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = (width - text_length) // 2 _SCREAMING_SNAKE_CASE = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase ( ) ->str: _SCREAMING_SNAKE_CASE = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _SCREAMING_SNAKE_CASE = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } _SCREAMING_SNAKE_CASE = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. _SCREAMING_SNAKE_CASE = collections.defaultdict(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = collections.defaultdict(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = collections.defaultdict(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = collections.defaultdict(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE = collections.defaultdict(_UpperCAmelCase ) # Let's lookup through all transformers object (once). for attr_name in dir(_UpperCAmelCase ): _SCREAMING_SNAKE_CASE = None if attr_name.endswith("""Tokenizer""" ): _SCREAMING_SNAKE_CASE = slow_tokenizers _SCREAMING_SNAKE_CASE = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): _SCREAMING_SNAKE_CASE = fast_tokenizers _SCREAMING_SNAKE_CASE = attr_name[:-13] elif _re_tf_models.match(_UpperCAmelCase ) is not None: _SCREAMING_SNAKE_CASE = tf_models _SCREAMING_SNAKE_CASE = _re_tf_models.match(_UpperCAmelCase ).groups()[0] elif _re_flax_models.match(_UpperCAmelCase ) is not None: _SCREAMING_SNAKE_CASE = flax_models _SCREAMING_SNAKE_CASE = _re_flax_models.match(_UpperCAmelCase ).groups()[0] elif _re_pt_models.match(_UpperCAmelCase ) is not None: _SCREAMING_SNAKE_CASE = pt_models _SCREAMING_SNAKE_CASE = _re_pt_models.match(_UpperCAmelCase ).groups()[0] if lookup_dict is not None: while len(_UpperCAmelCase ) > 0: if attr_name in model_name_to_prefix.values(): _SCREAMING_SNAKE_CASE = True break # Try again after removing the last word in the name _SCREAMING_SNAKE_CASE = """""".join(camel_case_split(_UpperCAmelCase )[:-1] ) # Let's build that table! _SCREAMING_SNAKE_CASE = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) _SCREAMING_SNAKE_CASE = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). _SCREAMING_SNAKE_CASE = [len(_UpperCAmelCase ) + 2 for c in columns] _SCREAMING_SNAKE_CASE = max([len(_UpperCAmelCase ) for name in model_names] ) + 2 # Build the table per se _SCREAMING_SNAKE_CASE = """|""" + """|""".join([_center_text(_UpperCAmelCase , _UpperCAmelCase ) for c, w in zip(_UpperCAmelCase , _UpperCAmelCase )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" _SCREAMING_SNAKE_CASE = {True: """✅""", False: """❌"""} for name in model_names: _SCREAMING_SNAKE_CASE = model_name_to_prefix[name] _SCREAMING_SNAKE_CASE = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(_UpperCAmelCase , _UpperCAmelCase ) for l, w in zip(_UpperCAmelCase , _UpperCAmelCase )] ) + "|\n" return table def lowerCamelCase ( __lowerCamelCase : Optional[int]=False ) ->str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _find_text_in_file( filename=os.path.join(_UpperCAmelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) _SCREAMING_SNAKE_CASE = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(_UpperCAmelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_model_table(args.fix_and_overwrite)
58
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __snake_case :Optional[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __snake_case :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(f'{len(upper_files)} files contain uppercase characters:') print('''\n'''.join(upper_files) + '''\n''') __snake_case :Tuple = [file for file in filepaths if ''' ''' in file] if space_files: print(f'{len(space_files)} files contain space characters:') print('''\n'''.join(space_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if '''-''' in file] if hyphen_files: print(f'{len(hyphen_files)} files contain hyphen characters:') print('''\n'''.join(hyphen_files) + '''\n''') __snake_case :Optional[int] = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'{len(nodir_files)} files are not in a directory:') print('''\n'''.join(nodir_files) + '''\n''') __snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
49
0
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A =logging.get_logger(__name__) __A =[ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def _UpperCamelCase ( UpperCamelCase__ ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: UpperCAmelCase__ : Dict = k.replace(_UpperCAmelCase , _UpperCAmelCase ) if k.startswith("""encoder""" ): UpperCAmelCase__ : Tuple = k.replace(""".attn""" , """.self_attn""" ) UpperCAmelCase__ : List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" ) UpperCAmelCase__ : Optional[Any] = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): UpperCAmelCase__ : Union[str, Any] = k.replace("""norm1""" , """self_attn_layer_norm""" ) UpperCAmelCase__ : Tuple = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) UpperCAmelCase__ : List[str] = k.replace("""norm3""" , """final_layer_norm""" ) return k def _UpperCamelCase ( UpperCamelCase__ ): UpperCAmelCase__ : Optional[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: UpperCAmelCase__ : Optional[int] = sd.pop(_UpperCAmelCase ) UpperCAmelCase__ : Any = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd UpperCAmelCase__ : List[str] = v __A =['''START'''] @torch.no_grad() def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase__ : int = torch.load(_UpperCAmelCase , map_location="""cpu""" ) UpperCAmelCase__ : Union[str, Any] = model["""model"""] UpperCAmelCase__ : Union[str, Any] = BlenderbotConfig.from_json_file(_UpperCAmelCase ) UpperCAmelCase__ : Any = BlenderbotForConditionalGeneration(_UpperCAmelCase ) UpperCAmelCase__ : Optional[int] = m.model.state_dict().keys() UpperCAmelCase__ : Tuple = [] UpperCAmelCase__ : List[str] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue UpperCAmelCase__ : int = rename_state_dict_key(_UpperCAmelCase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: UpperCAmelCase__ : Union[str, Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(_UpperCAmelCase ) m.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) m.half() m.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __A =argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) __A =parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
163
from collections import defaultdict def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = first_str.lower().strip() __a = second_str.lower().strip() # Remove whitespace __a = first_str.replace(''' ''' , '''''' ) __a = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): return False # Default values for count should be 0 __a = defaultdict(_UpperCAmelCase ) # For each character in input strings, # increment count in the corresponding for i in range(len(_UpperCAmelCase ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() __snake_case :Any = input('''Enter the first string ''').strip() __snake_case :int = input('''Enter the second string ''').strip() __snake_case :int = check_anagrams(input_a, input_b) print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
49
0
from math import asin, atan, cos, radians, sin, sqrt, tan _a = 6378137.0 _a = 6356752.314245 _a = 6378137 def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]: """simple docstring""" _UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A _UpperCAmelCase = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) _UpperCAmelCase = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) _UpperCAmelCase = radians(_UpperCAmelCase ) _UpperCAmelCase = radians(_UpperCAmelCase ) # Equation _UpperCAmelCase = sin((phi_a - phi_a) / 2 ) _UpperCAmelCase = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _UpperCAmelCase = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
39
import logging from transformers.configuration_utils import PretrainedConfig __snake_case :Any = logging.getLogger(__name__) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = '''masked_bert''' def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = pruning_method __a = mask_init __a = mask_scale
49
0
"""simple docstring""" def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = [1] for i in range(2 , _UpperCAmelCase ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCamelCase = [] UpperCamelCase = list(range(_UpperCAmelCase ) ) # Find permutation while factorials: UpperCamelCase = factorials.pop() UpperCamelCase , UpperCamelCase = divmod(_UpperCAmelCase , _UpperCAmelCase ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
153
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _A : UpperCamelCase__ : Optional[Union[str, Path]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = 1 UpperCamelCase__ : Optional[Union[str, bool]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
49
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __A( __UpperCAmelCase ): snake_case_ = '''dandelin/vilt-b32-finetuned-vqa''' snake_case_ = ( '''This is a tool that answers a question about an image. It takes an input named `image` which should be the ''' '''image containing the information, as well as a `question` which should be the question in English. It ''' '''returns a text that is the answer to the question.''' ) snake_case_ = '''image_qa''' snake_case_ = AutoProcessor snake_case_ = AutoModelForVisualQuestionAnswering snake_case_ = ['''image''', '''text'''] snake_case_ = ['''text'''] def __init__( self , *_snake_case , **_snake_case ) -> Dict: '''simple docstring''' requires_backends(self , ['''vision'''] ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' return self.pre_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): return self.model(**__SCREAMING_SNAKE_CASE ).logits def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[Any]: '''simple docstring''' __a = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
6
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Union[str, Any] = logging.get_logger(__name__) __snake_case :Any = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = '''switch_transformers''' UpperCamelCase__ : Optional[Any] = ['''past_key_values'''] UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' __a = vocab_size __a = d_model __a = d_kv __a = d_ff __a = num_sparse_encoder_layers __a = num_layers __a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __a = self.num_layers // self.num_sparse_encoder_layers else: __a = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __a = self.num_decoder_layers // self.num_sparse_decoder_layers else: __a = self.num_decoder_layers # HACK: this will create 0 sparse layers __a = num_heads __a = num_experts __a = expert_capacity __a = router_bias __a = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}') __a = router_dtype __a = router_ignore_padding_tokens __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = dropout_rate __a = layer_norm_epsilon __a = initializer_factor __a = feed_forward_proj __a = use_cache __a = add_router_probs __a = router_z_loss_coef __a = router_aux_loss_coef __a = self.feed_forward_proj.split('''-''') __a = act_info[-1] __a = act_info[0] == '''gated''' if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''') # for backwards compatibility if feed_forward_proj == "gated-gelu": __a = '''gelu_new''' super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
49
0