code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS} def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : List[str] ): if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: __A : Union[str, Any] = TOKENIZER_CLASSES else: __A : Union[str, Any] = {tokenizer_name: getattr(UpperCamelCase__, tokenizer_name + 'Fast' )} logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: __A : int = TOKENIZER_CLASSES[tokenizer_name] __A : str = True if checkpoint_name is None: __A : int = list(tokenizer_class.max_model_input_sizes.keys() ) else: __A : Dict = [checkpoint_name] logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer __A : Dict = tokenizer_class.from_pretrained(UpperCamelCase__, force_download=UpperCamelCase__ ) # Save fast tokenizer logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: __A ,__A : Optional[int] = checkpoint.split('/' ) __A : Any = os.path.join(UpperCamelCase__, UpperCamelCase__ ) elif add_prefix: __A : List[Any] = checkpoint __A : Optional[int] = dump_path else: __A : str = None __A : Optional[int] = dump_path logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __A : Union[str, Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __A : str = file_path.split(UpperCamelCase__ )[-1][0] if next_char == "/": __A : Optional[Any] = os.path.join(UpperCamelCase__, UpperCamelCase__ ) __A : Union[str, Any] = None logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) __A : Union[str, Any] = tokenizer.save_pretrained( UpperCamelCase__, legacy_format=UpperCamelCase__, filename_prefix=UpperCamelCase__ ) logger.info(f"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(UpperCamelCase__ ) logger.info(f"""=> removing {file_name}""" ) if __name__ == "__main__": UpperCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.' ) parser.add_argument( '--tokenizer_name', default=None, type=str, help=( f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' 'download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--checkpoint_name', default=None, type=str, help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.', ) parser.add_argument( '--force_download', action='store_true', help='Re-download checkpoints.', ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
365
"""simple docstring""" import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class _lowerCAmelCase ( lowerCamelCase ): def _a ( self ) -> List[str]: _UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def _a ( self ) -> Optional[int]: with self.assertRaises(a_ ): _UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def _a ( self ) -> int: with self.assertRaises(a_ ): _UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) ) def _a ( self ) -> Optional[Any]: _UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def _a ( self ) -> int: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): _UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) ) def _a ( self ) -> Dict: _UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def _a ( self ) -> Union[str, Any]: _UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) ) self.assertEqual(arr.type , pa.string() ) def _a ( self ) -> Union[str, Any]: _UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) ) def _a ( self ) -> Tuple: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): _UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) ) def _a ( self ) -> str: _UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) ) def _a ( self ) -> Tuple: _UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def _a ( self ) -> List[str]: import PIL.Image _UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( "datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects: _UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) ) _UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("optimize_list_casting" , a_ ) self.assertFalse(kwargs["optimize_list_casting"] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ ) _UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ ) _UpperCAmelCase = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() _UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: _UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __lowerCamelCase ( ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() _UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} ) with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer: writer.write({"labels": 0} ) writer.write({"labels": 1} ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata _UpperCAmelCase = pa.BufferReader(output.getvalue() ) _UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ ) _UpperCAmelCase = f.read_all() _UpperCAmelCase = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(UpperCamelCase__ ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() with ArrowWriter( stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer: with pytest.raises(UpperCamelCase__ ): writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] ) def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() with ArrowWriter( stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer: with pytest.raises(UpperCamelCase__ ): writer.write({"col_1": "foo", "col_2": 1} , key=10 ) writer.write({"col_1": "bar", "col_2": 2} , key=10 ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] ) def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() with ArrowWriter( stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer: writer.write({"col_1": "foo", "col_2": 1} , key=1 ) writer.write({"col_1": "bar", "col_2": 2} , key=2 ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() _UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) writer.write_batch({"col_1": [], "col_2": []} ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: _UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() _UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: _UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() _UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer: writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) ) writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: _UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __lowerCamelCase ( ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()} _UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" ) with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata ) _check_output(UpperCamelCase__ , 1 ) def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" if pa.types.is_list(UpperCamelCase__ ): return get_base_dtype(arr_type.value_type ) else: return arr_type def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if isinstance(lst[0] , UpperCamelCase__ ): change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ ) else: _UpperCAmelCase = value @pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( "col, expected_dtype" , [ ("attention_mask", pa.inta()), ("special_tokens_mask", pa.inta()), ("token_type_ids", pa.inta()), ("input_ids", pa.intaa()), ("other", pa.intaa()), ] , ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications _UpperCAmelCase = copy.deepcopy(UpperCamelCase__ ) _UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("raise_exception" , [False, True] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = str(tmp_path / "dataset-train.arrow" ) try: with ArrowWriter(path=UpperCamelCase__ ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = "mock://dataset-train.arrow" with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(UpperCamelCase__ ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(UpperCamelCase__ ) def __lowerCamelCase ( ): """simple docstring""" _UpperCAmelCase = pa.BufferOutputStream() with ParquetWriter(stream=UpperCamelCase__ ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) _UpperCAmelCase , _UpperCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _UpperCAmelCase = pa.BufferReader(output.getvalue() ) _UpperCAmelCase = pq.read_table(UpperCamelCase__ ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("embed_local_files" , [False, True] ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" import PIL.Image _UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" ) _UpperCAmelCase = pa.BufferOutputStream() with ParquetWriter( stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer: writer.write({"image": image_path} ) writer.finalize() _UpperCAmelCase = pa.BufferReader(output.getvalue() ) _UpperCAmelCase = pq.read_table(UpperCamelCase__ ) _UpperCAmelCase = pa_table.to_pydict() if embed_local_files: assert isinstance(out["image"][0]["path"] , UpperCamelCase__ ) with open(UpperCamelCase__ , "rb" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def __lowerCamelCase ( ): """simple docstring""" _UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] ) _UpperCAmelCase = pa.BufferOutputStream() with ArrowWriter(stream=UpperCamelCase__ ) as writer: writer._build_writer(inferred_schema=UpperCamelCase__ ) assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
657
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available _lowercase : List[str] = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys _lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
210
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class _lowerCAmelCase ( unittest.TestCase ): def _a ( self ) -> Optional[Any]: _UpperCAmelCase = ["a", "b", "c"] # Defaults to last layer if both are None _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ ) self.assertEqual(a_ , ["c"] ) self.assertEqual(a_ , [2] ) # Out indices set to match out features _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features set to match out indices _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [0, 2] ) # Out features selected from negative indices _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ ) self.assertEqual(a_ , ["a", "c"] ) self.assertEqual(a_ , [-3, -1] ) def _a ( self ) -> Optional[int]: # Stage names must be set with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ ) # Out features must be a list with self.assertRaises(a_ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(a_ ): verify_out_features_out_indices(a_ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(a_ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(a_ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def _a ( self ) -> int: _UpperCAmelCase = BackboneMixin() _UpperCAmelCase = ["a", "b", "c"] _UpperCAmelCase = ["a", "c"] _UpperCAmelCase = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly _UpperCAmelCase = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) _UpperCAmelCase = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
657
0
from math import factorial snake_case_ : Union[str, Any] = {str(d): factorial(d) for d in range(10)} def __a ( __UpperCAmelCase : int ) -> int: """simple docstring""" return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase__ ) ) def __a ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ : Optional[Any] = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , UpperCamelCase__ ) if sum_of_digit_factorial(UpperCamelCase__ ) == i ) if __name__ == "__main__": print(f"{solution() = }")
488
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
657
0
import operator as op def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = lambda lowercase , lowercase : int(x / y ) # noqa: E731 integer division operation SCREAMING_SNAKE_CASE : Any = { "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " ) print("-" * (30 + len(UpperCamelCase__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(UpperCamelCase__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " ) else: SCREAMING_SNAKE_CASE : Any = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " ) SCREAMING_SNAKE_CASE : Optional[Any] = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " ) stack.append( str(opr[x](int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": snake_case = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
62
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ): lowercase_ : Tuple = BarthezTokenizer lowercase_ : List[Any] = BarthezTokenizerFast lowercase_ : Dict = True lowercase_ : int = True def _a ( self ) -> Any: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ ) _UpperCAmelCase = tokenizer def _a ( self ) -> List[Any]: _UpperCAmelCase = "<pad>" _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def _a ( self ) -> List[Any]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(a_ ) , 101122 ) def _a ( self ) -> Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def _a ( self ) -> List[Any]: _UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" ) self.assertIsInstance(a_ , a_ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(a_ , a_ ) def _a ( self ) -> str: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = "I was born in 92000, and this is falsé." _UpperCAmelCase = tokenizer.tokenize(a_ ) _UpperCAmelCase = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) _UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ ) _UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(a_ ) _UpperCAmelCase = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) @slow def _a ( self ) -> Dict: # fmt: off _UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
657
0
from maths.prime_factors import prime_factors def lowercase__( A ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): snake_case__ : List[Any] = f'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCamelCase__ ) if number < 1: raise ValueError('Input must be a positive integer' ) return -1 if len(prime_factors(UpperCamelCase__ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
170
"""simple docstring""" def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _UpperCAmelCase = f"Input value of [number={number}] must be an integer" raise TypeError(UpperCamelCase__ ) if number < 0: return False _UpperCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
657
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Dict = logging.get_logger(__name__) lowercase : List[Any] = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A : Union[str, Any] = '''convbert''' def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=9 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Tuple: super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , ) snake_case_ : Optional[Any] = vocab_size snake_case_ : List[Any] = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Any = num_attention_heads snake_case_ : Optional[int] = intermediate_size snake_case_ : Dict = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : int = max_position_embeddings snake_case_ : Optional[Any] = type_vocab_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : List[Any] = embedding_size snake_case_ : Tuple = head_ratio snake_case_ : List[Any] = conv_kernel_size snake_case_ : List[Any] = num_groups snake_case_ : Union[str, Any] = classifier_dropout class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @property def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case_ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case_ : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
568
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __magic_name__ = logging.get_logger(__name__) __magic_name__ = Dict[str, Any] __magic_name__ = List[Prediction] @add_end_docstrings(lowerCamelCase ) class _lowerCAmelCase ( lowerCamelCase ): def __init__( self , *a_ , **a_ ) -> Optional[int]: super().__init__(*a_ , **a_ ) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) requires_backends(self , "vision" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _a ( self , **a_ ) -> List[str]: _UpperCAmelCase = {} if "threshold" in kwargs: _UpperCAmelCase = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]: return super().__call__(*a_ , **a_ ) def _a ( self , a_ ) -> Optional[Any]: _UpperCAmelCase = load_image(a_ ) _UpperCAmelCase = torch.IntTensor([[image.height, image.width]] ) _UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" ) if self.tokenizer is not None: _UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" ) _UpperCAmelCase = target_size return inputs def _a ( self , a_ ) -> Optional[Any]: _UpperCAmelCase = model_inputs.pop("target_size" ) _UpperCAmelCase = self.model(**a_ ) _UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} ) if self.tokenizer is not None: _UpperCAmelCase = model_inputs["bbox"] return model_outputs def _a ( self , a_ , a_=0.9 ) -> int: _UpperCAmelCase = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. _UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist() def unnormalize(a_ ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) _UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) _UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] _UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )] _UpperCAmelCase = ["score", "label", "box"] _UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel _UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ ) _UpperCAmelCase = raw_annotations[0] _UpperCAmelCase = raw_annotation["scores"] _UpperCAmelCase = raw_annotation["labels"] _UpperCAmelCase = raw_annotation["boxes"] _UpperCAmelCase = scores.tolist() _UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels] _UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] _UpperCAmelCase = ["score", "label", "box"] _UpperCAmelCase = [ dict(zip(a_ , a_ ) ) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] ) ] return annotation def _a ( self , a_ ) -> Dict[str, int]: if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist() _UpperCAmelCase = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
657
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { "configuration_instructblip": [ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig", ], "processing_instructblip": ["InstructBlipProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
117
"""simple docstring""" def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(UpperCamelCase__ ) <= 1: return collection _UpperCAmelCase = len(UpperCamelCase__ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() __magic_name__ = input('''Enter numbers separated by a comma:\n''').strip() __magic_name__ = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
657
0
import qiskit def __UpperCAmelCase ( __A = 2 ) -> Any: '''simple docstring''' UpperCAmelCase__ = qubits # Using Aer's simulator UpperCAmelCase__ = qiskit.Aer.get_backend("aer_simulator" ) # Creating a Quantum Circuit acting on the q register UpperCAmelCase__ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , UpperCamelCase__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , UpperCamelCase__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator UpperCAmelCase__ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_0_0_0 ) return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print(f"Total count for various states are: {quantum_entanglement(3)}")
475
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowerCAmelCase : def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = self.vocab_size - 1 def _a ( self ) -> Union[str, Any]: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]: _UpperCAmelCase = OpenAIGPTModel(config=a_ ) model.to(a_ ) model.eval() _UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ ) _UpperCAmelCase = model(a_ , token_type_ids=a_ ) _UpperCAmelCase = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]: _UpperCAmelCase = OpenAIGPTLMHeadModel(a_ ) model.to(a_ ) model.eval() _UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]: _UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ ) model.to(a_ ) model.eval() _UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict: _UpperCAmelCase = self.num_labels _UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ ) model.to(a_ ) model.eval() _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self ) -> List[str]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase_ : Any = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowercase_ : Optional[Any] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowercase_ : Union[str, Any] = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _a ( self , a_ , a_ , a_=False ) -> Optional[int]: _UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , ) _UpperCAmelCase = inputs_dict["labels"] _UpperCAmelCase = inputs_dict["labels"] _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def _a ( self ) -> Optional[int]: _UpperCAmelCase = OpenAIGPTModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 ) def _a ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def _a ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*a_ ) def _a ( self ) -> Tuple: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*a_ ) def _a ( self ) -> List[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*a_ ) def _a ( self ) -> List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ ) @slow def _a ( self ) -> int: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): @slow def _a ( self ) -> Any: _UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(a_ ) _UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is _UpperCAmelCase = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _UpperCAmelCase = model.generate(a_ , do_sample=a_ ) self.assertListEqual(output_ids[0].tolist() , a_ )
657
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __A (__magic_name__ , unittest.TestCase ): snake_case :Optional[Any] = DDIMPipeline snake_case :Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS snake_case :Tuple = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''latents''', '''callback''', '''callback_steps''', } snake_case :Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS snake_case :Tuple = False def _snake_case ( self ): torch.manual_seed(0 ) __UpperCAmelCase : Tuple = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) __UpperCAmelCase : Union[str, Any] = DDIMScheduler() __UpperCAmelCase : Any = {"unet": unet, "scheduler": scheduler} return components def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ): if str(a_ ).startswith("mps" ): __UpperCAmelCase : List[Any] = torch.manual_seed(a_ ) else: __UpperCAmelCase : List[str] = torch.Generator(device=a_ ).manual_seed(a_ ) __UpperCAmelCase : Optional[int] = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): __UpperCAmelCase : Any = "cpu" __UpperCAmelCase : List[Any] = self.get_dummy_components() __UpperCAmelCase : Any = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) __UpperCAmelCase : Optional[int] = self.get_dummy_inputs(a_ ) __UpperCAmelCase : int = pipe(**a_ ).images __UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) __UpperCAmelCase : int = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) __UpperCAmelCase : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_ , 1E-3 ) def _snake_case ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __A (unittest.TestCase ): def _snake_case ( self ): __UpperCAmelCase : int = "google/ddpm-cifar10-32" __UpperCAmelCase : int = UNetaDModel.from_pretrained(a_ ) __UpperCAmelCase : Tuple = DDIMScheduler() __UpperCAmelCase : List[Any] = DDIMPipeline(unet=a_ , scheduler=a_ ) ddim.to(a_ ) ddim.set_progress_bar_config(disable=a_ ) __UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) __UpperCAmelCase : Tuple = ddim(generator=a_ , eta=0.0 , output_type="numpy" ).images __UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase : Any = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = "google/ddpm-ema-bedroom-256" __UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(a_ ) __UpperCAmelCase : Any = DDIMScheduler.from_pretrained(a_ ) __UpperCAmelCase : Optional[Any] = DDIMPipeline(unet=a_ , scheduler=a_ ) ddpm.to(a_ ) ddpm.set_progress_bar_config(disable=a_ ) __UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = ddpm(generator=a_ , output_type="numpy" ).images __UpperCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __UpperCAmelCase : str = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
168
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ): """simple docstring""" _UpperCAmelCase = [] for _ in range(UpperCamelCase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ): """simple docstring""" _UpperCAmelCase = [] for step in range(UpperCamelCase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" ) torch.save(scheduler.state_dict() , UpperCamelCase__ ) _UpperCAmelCase = torch.load(UpperCamelCase__ ) scheduler.load_state_dict(UpperCamelCase__ ) return lrs @require_torch class _lowerCAmelCase ( unittest.TestCase ): def _a ( self , a_ , a_ , a_ ) -> Optional[int]: self.assertEqual(len(a_ ) , len(a_ ) ) for a, b in zip(a_ , a_ ): self.assertAlmostEqual(a_ , a_ , delta=a_ ) def _a ( self ) -> str: _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(a_ , a_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def _a ( self ) -> Union[str, Any]: _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , ) for _ in range(1000 ): _UpperCAmelCase = criterion(a_ , a_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None lowercase_ : Dict = 10 def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]: self.assertEqual(len(a_ ) , len(a_ ) ) for a, b in zip(a_ , a_ ): self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ ) def _a ( self ) -> List[Any]: _UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **a_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(a_ , self.num_steps ) self.assertListAlmostEqual( a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) _UpperCAmelCase = scheduler_func(self.optimizer , **a_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps ) self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" ) class _lowerCAmelCase : def __init__( self , a_ ) -> Union[str, Any]: _UpperCAmelCase = fn def __call__( self , *a_ , **a_ ) -> Union[str, Any]: return self.fn(*a_ , **a_ ) @classmethod def _a ( self , a_ ) -> Dict: _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
657
0
'''simple docstring''' from math import pi def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
620
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __lowerCamelCase ( UpperCamelCase__=None ): """simple docstring""" if subparsers is not None: _UpperCAmelCase = subparsers.add_parser("test" ) else: _UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=UpperCamelCase__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase__ ) return parser def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: _UpperCAmelCase = script_name else: _UpperCAmelCase = f"--config_file={args.config_file} {script_name}" _UpperCAmelCase = ["accelerate-launch"] + test_args.split() _UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __lowerCamelCase ( ): """simple docstring""" _UpperCAmelCase = test_command_parser() _UpperCAmelCase = parser.parse_args() test_command(UpperCamelCase__ ) if __name__ == "__main__": main()
657
0
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class A__ ( __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" __A : List[Any] = 1 @register_to_config def __init__( self , lowercase = 1000 , lowercase = None) -> Any: '''simple docstring''' self.set_timesteps(a_) # standard deviation of the initial noise distribution a__ : Optional[int] = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. a__ : Optional[Any] = 4 # running values a__ : str = [] def __lowercase ( self , lowercase , lowercase = None) -> Any: '''simple docstring''' a__ : str = num_inference_steps a__ : List[Any] = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1] a__ : str = torch.cat([steps, torch.tensor([0.0])]) if self.config.trained_betas is not None: a__ : Optional[int] = torch.tensor(self.config.trained_betas , dtype=torch.floataa) else: a__ : Any = torch.sin(steps * math.pi / 2) ** 2 a__ : List[str] = (1.0 - self.betas**2) ** 0.5 a__ : str = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1] a__ : Optional[int] = timesteps.to(a_) a__ : List[Any] = [] def __lowercase ( self , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[SchedulerOutput, Tuple]: '''simple docstring''' if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler') a__ : Optional[int] = (self.timesteps == timestep).nonzero().item() a__ : str = timestep_index + 1 a__ : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(a_) if len(self.ets) == 1: a__ : List[str] = self.ets[-1] elif len(self.ets) == 2: a__ : List[Any] = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets) == 3: a__ : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: a__ : Dict = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) a__ : Optional[int] = self._get_prev_sample(a_ , a_ , a_ , a_) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=a_) def __lowercase ( self , lowercase , *lowercase , **lowercase) -> torch.FloatTensor: '''simple docstring''' return sample def __lowercase ( self , lowercase , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : int = self.alphas[timestep_index] a__ : Dict = self.betas[timestep_index] a__ : Any = self.alphas[prev_timestep_index] a__ : List[Any] = self.betas[prev_timestep_index] a__ : List[Any] = (sample - sigma * ets) / max(a_ , 1e-8) a__ : Union[str, Any] = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self) -> Dict: '''simple docstring''' return self.config.num_train_timesteps
302
"""simple docstring""" def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" return 10 - x * x def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0: raise ValueError("Wrong space!" ) _UpperCAmelCase = a while (b - a) >= 0.01: # Find middle point _UpperCAmelCase = (a + b) / 2 # Check if middle point is root if equation(UpperCamelCase__ ) == 0.0: break # Decide the side to repeat the steps if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0: _UpperCAmelCase = c else: _UpperCAmelCase = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
657
0
'''simple docstring''' import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class _lowerCamelCase : '''simple docstring''' def __init__( self , __lowercase , __lowercase=14 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ): """simple docstring""" __A : List[Any] = parent __A : Optional[int] = batch_size __A : Any = seq_length __A : int = is_training __A : Optional[Any] = use_token_type_ids __A : int = use_input_mask __A : Dict = use_labels __A : List[Any] = use_mc_token_ids __A : List[Any] = vocab_size __A : Tuple = hidden_size __A : Optional[int] = num_hidden_layers __A : List[str] = num_attention_heads __A : List[Any] = intermediate_size __A : Any = hidden_act __A : Tuple = hidden_dropout_prob __A : List[Any] = attention_probs_dropout_prob __A : Union[str, Any] = max_position_embeddings __A : str = type_vocab_size __A : Optional[Any] = type_sequence_label_size __A : int = initializer_range __A : List[str] = num_labels __A : Union[str, Any] = num_choices __A : Optional[int] = scope __A : Optional[int] = self.vocab_size - 1 def snake_case__ ( self ): """simple docstring""" __A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __A : Tuple = None if self.use_input_mask: __A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) __A : int = None if self.use_token_type_ids: __A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __A : Optional[Any] = None if self.use_mc_token_ids: __A : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __A : Any = None __A : Optional[Any] = None __A : Tuple = None if self.use_labels: __A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __A : Dict = ids_tensor([self.batch_size] , self.num_choices ) __A : Union[str, Any] = self.get_config() __A : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def snake_case__ ( self ): """simple docstring""" return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ): """simple docstring""" __A : str = CTRLModel(config=a_ ) model.to(a_ ) model.eval() model(a_ , token_type_ids=a_ , head_mask=a_ ) model(a_ , token_type_ids=a_ ) __A : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ): """simple docstring""" __A : int = CTRLLMHeadModel(a_ ) model.to(a_ ) model.eval() __A : Optional[int] = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self ): """simple docstring""" __A : Tuple = self.prepare_config_and_inputs() ( ( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) ,( __A ) , ) : Dict = config_and_inputs __A : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask} return config, inputs_dict def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ): """simple docstring""" __A : List[Any] = self.num_labels __A : List[Any] = CTRLForSequenceClassification(a_ ) model.to(a_ ) model.eval() __A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __A : int = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class _lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): '''simple docstring''' __lowercase : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () __lowercase : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else () __lowercase : Tuple = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) __lowercase : int = True __lowercase : List[Any] = False __lowercase : List[str] = False def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def snake_case__ ( self ): """simple docstring""" __A : Optional[Any] = CTRLModelTester(self ) __A : Union[str, Any] = ConfigTester(self , config_class=a_ , n_embd=37 ) def snake_case__ ( self ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case__ ( self ): """simple docstring""" __A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*a_ ) def snake_case__ ( self ): """simple docstring""" __A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*a_ ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case__ ( self ): """simple docstring""" pass @slow def snake_case__ ( self ): """simple docstring""" for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A : Any = CTRLModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def snake_case__ ( self ): """simple docstring""" pass @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def snake_case__ ( self ): """simple docstring""" __A : Optional[Any] = CTRLLMHeadModel.from_pretrained('ctrl' ) model.to(a_ ) __A : Tuple = torch.tensor( [[11_859, 0, 1_611, 8]] , dtype=torch.long , device=a_ ) # Legal the president is __A : List[Any] = [ 11_859, 0, 1_611, 8, 5, 150, 26_449, 2, 19, 348, 469, 3, 2_595, 48, 20_740, 246_533, 246_533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __A : int = model.generate(a_ , do_sample=a_ ) self.assertListEqual(output_ids[0].tolist() , a_ )
365
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]: super().__init__() _UpperCAmelCase = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) _UpperCAmelCase = prefix_inner_dim _UpperCAmelCase = prefix_hidden_dim _UpperCAmelCase = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) _UpperCAmelCase = ( nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) _UpperCAmelCase = GPTaConfig( vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , ) _UpperCAmelCase = GPTaLMHeadModel(a_ ) def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple: _UpperCAmelCase = self.transformer.transformer.wte(a_ ) _UpperCAmelCase = self.encode_prefix(a_ ) _UpperCAmelCase = self.decode_prefix(a_ ) _UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: _UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) _UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 ) _UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _a ( self , a_ , a_ ) -> torch.Tensor: return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ ) def _a ( self , a_ ) -> Union[str, Any]: return self.encode_prefix(a_ ) @torch.no_grad() def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]: _UpperCAmelCase = torch.split(a_ , 1 , dim=0 ) _UpperCAmelCase = [] _UpperCAmelCase = [] for feature in features: _UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature # Only support beam search for now _UpperCAmelCase , _UpperCAmelCase = self.generate_beam( input_embeds=a_ , device=a_ , eos_token_id=a_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) _UpperCAmelCase = torch.stack(a_ ) _UpperCAmelCase = torch.stack(a_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]: _UpperCAmelCase = eos_token_id _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int ) _UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool ) if input_embeds is not None: _UpperCAmelCase = input_embeds else: _UpperCAmelCase = self.transformer.transformer.wte(a_ ) for i in range(a_ ): _UpperCAmelCase = self.transformer(inputs_embeds=a_ ) _UpperCAmelCase = outputs.logits _UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) _UpperCAmelCase = logits.softmax(-1 ).log() if scores is None: _UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 ) _UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] ) _UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: _UpperCAmelCase = next_tokens else: _UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] ) _UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 ) else: _UpperCAmelCase = -float(np.inf ) _UpperCAmelCase = 0 _UpperCAmelCase = scores[:, None] + logits seq_lengths[~is_stopped] += 1 _UpperCAmelCase = scores_sum / seq_lengths[:, None] _UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 ) _UpperCAmelCase = next_tokens // scores_sum.shape[1] _UpperCAmelCase = seq_lengths[next_tokens_source] _UpperCAmelCase = next_tokens % scores_sum.shape[1] _UpperCAmelCase = next_tokens.unsqueeze(1 ) _UpperCAmelCase = tokens[next_tokens_source] _UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 ) _UpperCAmelCase = generated[next_tokens_source] _UpperCAmelCase = scores_sum_average * seq_lengths _UpperCAmelCase = is_stopped[next_tokens_source] _UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) _UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 ) _UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze() if is_stopped.all(): break _UpperCAmelCase = scores / seq_lengths _UpperCAmelCase = scores.argsort(descending=a_ ) # tokens tensors are already padded to max_seq_length _UpperCAmelCase = [tokens[i] for i in order] _UpperCAmelCase = torch.stack(a_ , dim=0 ) _UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
657
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowercase : Any = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ["""LayoutLMv2FeatureExtractor"""] _lowercase : Optional[Any] = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys _lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
210
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable __magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
657
0
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : List[Any] ) -> int: return F"gaussian_noise_s={seed}_shape={'_'.join([str(a_ ) for s in shape] )}.npy" def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict=0 , __magic_name__ : Optional[int]=(4, 4, 64, 64) , __magic_name__ : int=False ) -> Optional[int]: lowerCamelCase_ : str = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase_ : Tuple = jnp.array(load_hf_numpy(self.get_file_format(a_ , a_ ) ) , dtype=a_ ) return image def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : str=False , __magic_name__ : Tuple="CompVis/stable-diffusion-v1-4" ) -> Tuple: lowerCamelCase_ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase_ : int = "bf16" if fpaa else None lowerCamelCase_ , lowerCamelCase_ : str = FlaxUNetaDConditionModel.from_pretrained( a_ , subfolder="unet" , dtype=a_ , revision=a_ ) return model, params def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str]=0 , __magic_name__ : Dict=(4, 77, 768) , __magic_name__ : Union[str, Any]=False ) -> Optional[Any]: lowerCamelCase_ : List[str] = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase_ : str = jnp.array(load_hf_numpy(self.get_file_format(a_ , a_ ) ) , dtype=a_ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : int , __magic_name__ : str , __magic_name__ : List[Any] ) -> Any: lowerCamelCase_ , lowerCamelCase_ : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=a_ ) lowerCamelCase_ : int = self.get_latents(a_ , fpaa=a_ ) lowerCamelCase_ : Optional[int] = self.get_encoder_hidden_states(a_ , fpaa=a_ ) lowerCamelCase_ : List[str] = model.apply( {"params": params} , a_ , jnp.array(a_ , dtype=jnp.intaa ) , encoder_hidden_states=a_ , ).sample assert sample.shape == latents.shape lowerCamelCase_ : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowerCamelCase_ : Union[str, Any] = jnp.array(a_ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(a_ , a_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> List[Any]: lowerCamelCase_ , lowerCamelCase_ : Tuple = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=a_ ) lowerCamelCase_ : List[str] = self.get_latents(a_ , shape=(4, 4, 96, 96) , fpaa=a_ ) lowerCamelCase_ : str = self.get_encoder_hidden_states(a_ , shape=(4, 77, 1024) , fpaa=a_ ) lowerCamelCase_ : Optional[Any] = model.apply( {"params": params} , a_ , jnp.array(a_ , dtype=jnp.intaa ) , encoder_hidden_states=a_ , ).sample assert sample.shape == latents.shape lowerCamelCase_ : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowerCamelCase_ : List[Any] = jnp.array(a_ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(a_ , a_ , atol=1e-2 )
488
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _lowerCAmelCase ( lowerCamelCase ): lowercase_ : Union[str, Any] = '''convbert''' def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple: super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = embedding_size _UpperCAmelCase = head_ratio _UpperCAmelCase = conv_kernel_size _UpperCAmelCase = num_groups _UpperCAmelCase = classifier_dropout class _lowerCAmelCase ( lowerCamelCase ): @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
657
0
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": snake_case = argparse.ArgumentParser( description=( """Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""]) parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") snake_case = parser.parse_args() if args.model_type == "bert": snake_case = BertForMaskedLM.from_pretrained(args.model_name) snake_case = """bert""" else: raise ValueError("""args.model_type should be \"bert\".""") snake_case = model.state_dict() snake_case = {} for w in ["word_embeddings", "position_embeddings"]: snake_case = state_dict[F"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: snake_case = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""] snake_case = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: snake_case = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] snake_case = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] snake_case = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] snake_case = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] snake_case = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] snake_case = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] snake_case = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] snake_case = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 snake_case = state_dict["""cls.predictions.decoder.weight"""] snake_case = state_dict["""cls.predictions.bias"""] if args.vocab_transform: for w in ["weight", "bias"]: snake_case = state_dict[F"""cls.predictions.transform.dense.{w}"""] snake_case = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""] print(F"""N layers selected for distillation: {std_idx}""") print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
62
"""simple docstring""" def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] ) def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" if (len(UpperCamelCase__ ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
657
0
import argparse import os import torch from transformers.utils import WEIGHTS_NAME lowerCamelCase : Any = ['small', 'medium', 'large'] lowerCamelCase : Any = 'lm_head.decoder.weight' lowerCamelCase : Any = 'lm_head.weight' def lowercase__( A , A ): snake_case__ : Tuple = torch.load(UpperCamelCase__ ) snake_case__ : Optional[Any] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) lowerCamelCase : Tuple = parser.parse_args() for MODEL in DIALOGPT_MODELS: lowerCamelCase : Optional[int] = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") lowerCamelCase : int = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
170
"""simple docstring""" def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" try: _UpperCAmelCase = float(UpperCamelCase__ ) except ValueError: raise ValueError("Please enter a valid number" ) _UpperCAmelCase = decimal - int(UpperCamelCase__ ) if fractional_part == 0: return int(UpperCamelCase__ ), 1 else: _UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] ) _UpperCAmelCase = int(decimal * (10**number_of_frac_digits) ) _UpperCAmelCase = 10**number_of_frac_digits _UpperCAmelCase , _UpperCAmelCase = denominator, numerator while True: _UpperCAmelCase = dividend % divisor if remainder == 0: break _UpperCAmelCase , _UpperCAmelCase = divisor, remainder _UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor return int(UpperCamelCase__ ), int(UpperCamelCase__ ) if __name__ == "__main__": print(f'''{decimal_to_fraction(2) = }''') print(f'''{decimal_to_fraction(89.0) = }''') print(f'''{decimal_to_fraction("67") = }''') print(f'''{decimal_to_fraction("45.0") = }''') print(f'''{decimal_to_fraction(1.5) = }''') print(f'''{decimal_to_fraction("6.25") = }''') print(f'''{decimal_to_fraction("78td") = }''')
657
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase : Dict = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
568
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" _UpperCAmelCase = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] _UpperCAmelCase = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } _UpperCAmelCase = f"{src_lang}-{tgt_lang}" _UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n" model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ ) _UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" ) print(f"Generating {path}" ) with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f: f.write(UpperCamelCase__ ) # make sure we are under the root of the project __magic_name__ = Path(__file__).resolve().parent.parent.parent __magic_name__ = repo_dir / '''model_cards''' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: __magic_name__ = model_cards_dir / '''allenai''' / model_name write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
657
0
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig UpperCAmelCase__ = { "facebook/maskformer-swin-base-ade": ( "https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } UpperCAmelCase__ = logging.get_logger(__name__) class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = '''maskformer''' __snake_case = {'''hidden_size''': '''mask_feature_size'''} __snake_case = ['''resnet''', '''swin'''] __snake_case = ['''detr'''] def __init__( self : Tuple , __UpperCAmelCase : Optional[Any] = 256 , __UpperCAmelCase : Dict = 256 , __UpperCAmelCase : Dict = 0.1 , __UpperCAmelCase : str = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Union[str, Any] = None , __UpperCAmelCase : int = 0.02 , __UpperCAmelCase : Optional[Any] = 1.0 , __UpperCAmelCase : Any = 1.0 , __UpperCAmelCase : Union[str, Any] = 1.0 , __UpperCAmelCase : Optional[Any] = 20.0 , __UpperCAmelCase : List[str] = None , **__UpperCAmelCase : Optional[int] , ) ->int: """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k a = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(a_ , a_ ): a = backbone_config.pop('''model_type''' ) a = CONFIG_MAPPING[backbone_model_type] a = config_class.from_dict(a_ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ F"""Supported model types: {','.join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 a = DetrConfig() else: # verify that the decoder is supported a = ( decoder_config.pop('''model_type''' ) if isinstance(a_ , a_ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F"""Transformer Decoder {decoder_type} not supported, please use one of""" F""" {','.join(self.decoders_supported )}""" ) if isinstance(a_ , a_ ): a = CONFIG_MAPPING[decoder_type] a = config_class.from_dict(a_ ) a = backbone_config a = decoder_config # main feature dimension for the model a = fpn_feature_size a = mask_feature_size # initializer a = init_std a = init_xavier_std # Hungarian matcher && loss a = cross_entropy_weight a = dice_weight a = mask_weight a = use_auxiliary_loss a = no_object_weight a = output_auxiliary_logits a = self.decoder_config.encoder_attention_heads a = self.decoder_config.num_hidden_layers super().__init__(**a_ ) @classmethod def __lowerCAmelCase ( cls : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , **__UpperCAmelCase : Dict ) ->Tuple: """simple docstring""" return cls( backbone_config=a_ , decoder_config=a_ , **a_ , ) def __lowerCAmelCase ( self : int ) ->Dict[str, any]: """simple docstring""" a = copy.deepcopy(self.__dict__ ) a = self.backbone_config.to_dict() a = self.decoder_config.to_dict() a = self.__class__.model_type return output
117
"""simple docstring""" from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=lowerCamelCase ): lowercase_ : Dict = ['''torch''', '''torchsde'''] def __init__( self , *a_ , **a_ ) -> Optional[int]: requires_backends(self , ["torch", "torchsde"] ) @classmethod def _a ( cls , *a_ , **a_ ) -> Optional[Any]: requires_backends(cls , ["torch", "torchsde"] ) @classmethod def _a ( cls , *a_ , **a_ ) -> List[Any]: requires_backends(cls , ["torch", "torchsde"] )
657
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class lowercase__ ( unittest.TestCase ): def __init__( self : List[Any] , _lowercase : List[str] , _lowercase : List[str]=7 , _lowercase : Any=3 , _lowercase : Any=30 , _lowercase : Dict=4_00 , _lowercase : List[Any]=True , _lowercase : Union[str, Any]=None , _lowercase : str=True , _lowercase : List[str]=[0.5, 0.5, 0.5] , _lowercase : List[Any]=[0.5, 0.5, 0.5] , _lowercase : Optional[Any]=True , _lowercase : Union[str, Any]=1 / 2_55 , _lowercase : Optional[Any]=True , ): """simple docstring""" UpperCAmelCase__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = min_resolution UpperCAmelCase__ = max_resolution UpperCAmelCase__ = do_resize UpperCAmelCase__ = size UpperCAmelCase__ = do_normalize UpperCAmelCase__ = image_mean UpperCAmelCase__ = image_std UpperCAmelCase__ = do_rescale UpperCAmelCase__ = rescale_factor UpperCAmelCase__ = do_pad def _UpperCAmelCase ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : str , _lowercase : str=False ): """simple docstring""" if not batched: UpperCAmelCase__ = image_inputs[0] if isinstance(a_ , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ = image.size else: UpperCAmelCase__ , UpperCAmelCase__ = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ = int(self.size["shortest_edge"] * h / w ) UpperCAmelCase__ = self.size["shortest_edge"] elif w > h: UpperCAmelCase__ = self.size["shortest_edge"] UpperCAmelCase__ = int(self.size["shortest_edge"] * w / h ) else: UpperCAmelCase__ = self.size["shortest_edge"] UpperCAmelCase__ = self.size["shortest_edge"] else: UpperCAmelCase__ = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ = max(a_ , key=lambda _lowercase : item[0] )[0] UpperCAmelCase__ = max(a_ , key=lambda _lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): A__= DeformableDetrImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = DeformableDetrImageProcessingTester(self ) @property def _UpperCAmelCase ( self : List[Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "image_mean" ) ) self.assertTrue(hasattr(a_ , "image_std" ) ) self.assertTrue(hasattr(a_ , "do_normalize" ) ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "do_rescale" ) ) self.assertTrue(hasattr(a_ , "do_pad" ) ) self.assertTrue(hasattr(a_ , "size" ) ) def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , a_ ) UpperCAmelCase__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a_ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , a_ ) def _UpperCAmelCase ( self : str ): """simple docstring""" pass def _UpperCAmelCase ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) UpperCAmelCase__ = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ = image_processing(a_ , return_tensors="pt" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCAmelCase ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(a_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ = image_processing(a_ , return_tensors="pt" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(a_ , batched=a_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _UpperCAmelCase ( self : Dict ): """simple docstring""" UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: UpperCAmelCase__ = json.loads(f.read() ) UpperCAmelCase__ = {"image_id": 3_97_69, "annotations": target} # encode them UpperCAmelCase__ = DeformableDetrImageProcessor() UpperCAmelCase__ = image_processing(images=a_ , annotations=a_ , return_tensors="pt" ) # verify pixel values UpperCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , a_ ) UpperCAmelCase__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a_ , atol=1E-4 ) ) # verify area UpperCAmelCase__ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a_ ) ) # verify boxes UpperCAmelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , a_ ) UpperCAmelCase__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a_ , atol=1E-3 ) ) # verify image_id UpperCAmelCase__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a_ ) ) # verify is_crowd UpperCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a_ ) ) # verify class_labels UpperCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a_ ) ) # verify orig_size UpperCAmelCase__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a_ ) ) # verify size UpperCAmelCase__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a_ ) ) @slow def _UpperCAmelCase ( self : Dict ): """simple docstring""" UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: UpperCAmelCase__ = json.loads(f.read() ) UpperCAmelCase__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} UpperCAmelCase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them UpperCAmelCase__ = DeformableDetrImageProcessor(format="coco_panoptic" ) UpperCAmelCase__ = image_processing(images=a_ , annotations=a_ , masks_path=a_ , return_tensors="pt" ) # verify pixel values UpperCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , a_ ) UpperCAmelCase__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a_ , atol=1E-4 ) ) # verify area UpperCAmelCase__ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a_ ) ) # verify boxes UpperCAmelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , a_ ) UpperCAmelCase__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a_ , atol=1E-3 ) ) # verify image_id UpperCAmelCase__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a_ ) ) # verify is_crowd UpperCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a_ ) ) # verify class_labels UpperCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a_ ) ) # verify masks UpperCAmelCase__ = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a_ ) # verify orig_size UpperCAmelCase__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a_ ) ) # verify size UpperCAmelCase__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a_ ) )
475
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __magic_name__ = logging.get_logger(__name__) class _lowerCAmelCase ( lowerCamelCase ): lowercase_ : Optional[Any] = '''upernet''' def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]: super().__init__(**a_ ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(a_ , a_ ): _UpperCAmelCase = backbone_config.get("model_type" ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(a_ ) _UpperCAmelCase = backbone_config _UpperCAmelCase = hidden_size _UpperCAmelCase = initializer_range _UpperCAmelCase = pool_scales _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_in_channels _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = loss_ignore_index def _a ( self ) -> int: _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
657
0
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __A (__magic_name__ ): snake_case :Optional[int] = (EulerDiscreteScheduler,) snake_case :List[Any] = 10 def _snake_case ( self , **UpperCamelCase_ ): __UpperCAmelCase : int = { "num_train_timesteps": 11_00, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", } config.update(**a_ ) return config def _snake_case ( self ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=a_ ) def _snake_case ( self ): for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=a_ , beta_end=a_ ) def _snake_case ( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=a_ ) def _snake_case ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a_ ) def _snake_case ( self ): __UpperCAmelCase : Tuple = self.scheduler_classes[0] __UpperCAmelCase : Optional[int] = self.get_scheduler_config() __UpperCAmelCase : str = scheduler_class(**a_ ) scheduler.set_timesteps(self.num_inference_steps ) __UpperCAmelCase : List[Any] = torch.manual_seed(0 ) __UpperCAmelCase : Union[str, Any] = self.dummy_model() __UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __UpperCAmelCase : List[Any] = sample.to(a_ ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase : Tuple = scheduler.scale_model_input(a_ , a_ ) __UpperCAmelCase : Any = model(a_ , a_ ) __UpperCAmelCase : Optional[int] = scheduler.step(a_ , a_ , a_ , generator=a_ ) __UpperCAmelCase : str = output.prev_sample __UpperCAmelCase : str = torch.sum(torch.abs(a_ ) ) __UpperCAmelCase : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3 def _snake_case ( self ): __UpperCAmelCase : Any = self.scheduler_classes[0] __UpperCAmelCase : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" ) __UpperCAmelCase : Optional[int] = scheduler_class(**a_ ) scheduler.set_timesteps(self.num_inference_steps ) __UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) __UpperCAmelCase : str = self.dummy_model() __UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma __UpperCAmelCase : Optional[Any] = sample.to(a_ ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase : List[str] = scheduler.scale_model_input(a_ , a_ ) __UpperCAmelCase : Optional[int] = model(a_ , a_ ) __UpperCAmelCase : List[Any] = scheduler.step(a_ , a_ , a_ , generator=a_ ) __UpperCAmelCase : Optional[int] = output.prev_sample __UpperCAmelCase : Dict = torch.sum(torch.abs(a_ ) ) __UpperCAmelCase : Tuple = torch.mean(torch.abs(a_ ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1E-2 assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3 def _snake_case ( self ): __UpperCAmelCase : Optional[int] = self.scheduler_classes[0] __UpperCAmelCase : int = self.get_scheduler_config() __UpperCAmelCase : int = scheduler_class(**a_ ) scheduler.set_timesteps(self.num_inference_steps , device=a_ ) __UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) __UpperCAmelCase : Union[str, Any] = self.dummy_model() __UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __UpperCAmelCase : Union[str, Any] = sample.to(a_ ) for t in scheduler.timesteps: __UpperCAmelCase : int = scheduler.scale_model_input(a_ , a_ ) __UpperCAmelCase : Union[str, Any] = model(a_ , a_ ) __UpperCAmelCase : int = scheduler.step(a_ , a_ , a_ , generator=a_ ) __UpperCAmelCase : str = output.prev_sample __UpperCAmelCase : Dict = torch.sum(torch.abs(a_ ) ) __UpperCAmelCase : Optional[int] = torch.mean(torch.abs(a_ ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3 def _snake_case ( self ): __UpperCAmelCase : Optional[Any] = self.scheduler_classes[0] __UpperCAmelCase : List[Any] = self.get_scheduler_config() __UpperCAmelCase : Optional[int] = scheduler_class(**a_ , use_karras_sigmas=a_ ) scheduler.set_timesteps(self.num_inference_steps , device=a_ ) __UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 ) __UpperCAmelCase : List[str] = self.dummy_model() __UpperCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __UpperCAmelCase : Optional[Any] = sample.to(a_ ) for t in scheduler.timesteps: __UpperCAmelCase : Optional[Any] = scheduler.scale_model_input(a_ , a_ ) __UpperCAmelCase : str = model(a_ , a_ ) __UpperCAmelCase : Dict = scheduler.step(a_ , a_ , a_ , generator=a_ ) __UpperCAmelCase : int = output.prev_sample __UpperCAmelCase : List[str] = torch.sum(torch.abs(a_ ) ) __UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(a_ ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1E-3
168
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import _LazyModule __magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
657
0
from __future__ import annotations from collections.abc import MutableSequence class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: if len(SCREAMING_SNAKE_CASE_ ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) UpperCamelCase :list[float] = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = degree def __add__( self , SCREAMING_SNAKE_CASE_ ) -> Polynomial: if self.degree > polynomial_a.degree: UpperCamelCase :Dict = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase :Dict = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def __sub__( self , SCREAMING_SNAKE_CASE_ ) -> Polynomial: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self ) -> Polynomial: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self , SCREAMING_SNAKE_CASE_ ) -> Polynomial: UpperCamelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int | float: UpperCamelCase :int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ) -> str: UpperCamelCase :str = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ ) return polynomial def __repr__( self ) -> str: return self.__str__() def UpperCAmelCase ( self ) -> Polynomial: UpperCamelCase :list[float] = [0] * self.degree for i in range(self.degree ): UpperCamelCase :List[str] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = 0 ) -> Polynomial: UpperCamelCase :list[float] = [0] * (self.degree + 2) UpperCamelCase :int = constant for i in range(self.degree + 1 ): UpperCamelCase :Optional[Any] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ ) def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> bool: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self , SCREAMING_SNAKE_CASE_ ) -> bool: return not self.__eq__(SCREAMING_SNAKE_CASE_ )
658
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __snake_case = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ __snake_case = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ __snake_case = R""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase :Tuple = 0.0 for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0 UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ ) return { "accuracy": accuracy, }
658
1
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel __snake_case = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } __snake_case = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ): UpperCamelCase , UpperCamelCase :str = create_model( '''HTSAT-tiny''' , '''roberta''' , SCREAMING_SNAKE_CASE__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=SCREAMING_SNAKE_CASE__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): UpperCamelCase :List[Any] = {} UpperCamelCase :Optional[Any] = R'''.*sequential.(\d+).*''' UpperCamelCase :int = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCamelCase :List[str] = key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # replace sequential layers with list UpperCamelCase :Union[str, Any] = re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).group(1 ) UpperCamelCase :Any = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(SCREAMING_SNAKE_CASE__ )//3}.linear.''' ) elif re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = int(re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCamelCase :int = 1 if projecton_layer == 0 else 2 UpperCamelCase :Dict = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCamelCase :str = value UpperCamelCase :Optional[int] = mixed_qkv.size(0 ) // 3 UpperCamelCase :Optional[int] = mixed_qkv[:qkv_dim] UpperCamelCase :Dict = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCamelCase :str = mixed_qkv[qkv_dim * 2 :] UpperCamelCase :Optional[Any] = query_layer UpperCamelCase :Tuple = key_layer UpperCamelCase :Optional[int] = value_layer else: UpperCamelCase :Tuple = value return model_state_dict def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ): UpperCamelCase , UpperCamelCase :Dict = init_clap(SCREAMING_SNAKE_CASE__ , enable_fusion=SCREAMING_SNAKE_CASE__ ) clap_model.eval() UpperCamelCase :Optional[int] = clap_model.state_dict() UpperCamelCase :Union[str, Any] = rename_state_dict(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = ClapConfig() UpperCamelCase :Any = enable_fusion UpperCamelCase :Optional[Any] = ClapModel(SCREAMING_SNAKE_CASE__ ) # ignore the spectrogram embedding layer model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) transformers_config.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") __snake_case = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
658
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow __snake_case = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ """text-classification""", """language-modeling""", """summarization""", """token-classification""", """question-answering""", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) __snake_case = logging.getLogger() def _A ( ): UpperCamelCase :List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase :Dict = parser.parse_args() return args.f def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ): UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) raise ValueError(F'''can\'t find {path}''' ) __snake_case = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[Any] = F''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_glue.main() UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :int = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[Any] = F''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_clm_flax.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Dict = self.get_auto_remove_tmp_dir() UpperCamelCase :Any = F''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_summarization_flax.main() UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir() UpperCamelCase :List[str] = F''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_mlm_flax.main() UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir() UpperCamelCase :int = F''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_ta_mlm_flax.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def UpperCAmelCase ( self ) -> Tuple: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2 UpperCamelCase :int = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[int] = F''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_ner.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCAmelCase ( self ) -> Any: UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir() UpperCamelCase :Dict = F''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_qa.main() UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
658
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[Any] = tempfile.mkdtemp() UpperCamelCase :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) UpperCamelCase :Tuple = { '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], '''do_convert_rgb''': True, } UpperCamelCase :Tuple = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int: return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase :Dict = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :str = self.get_tokenizer() UpperCamelCase :Optional[Any] = self.get_rust_tokenizer() UpperCamelCase :Optional[int] = self.get_image_processor() UpperCamelCase :Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCamelCase :str = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCamelCase :Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase :Tuple = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' ) UpperCamelCase :List[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :List[Any] = self.get_image_processor() UpperCamelCase :Tuple = self.get_tokenizer() UpperCamelCase :Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = self.prepare_image_inputs() UpperCamelCase :Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) UpperCamelCase :List[str] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase ( self ) -> Any: UpperCamelCase :int = self.get_image_processor() UpperCamelCase :str = self.get_tokenizer() UpperCamelCase :Dict = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = '''Alexandra,T-shirt的价格是15便士。''' UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = tokenizer(SCREAMING_SNAKE_CASE_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :Dict = self.get_image_processor() UpperCamelCase :int = self.get_tokenizer() UpperCamelCase :List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = '''Alexandra,T-shirt的价格是15便士。''' UpperCamelCase :Optional[Any] = self.prepare_image_inputs() UpperCamelCase :Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Tuple = self.get_image_processor() UpperCamelCase :int = self.get_tokenizer() UpperCamelCase :List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase :Union[str, Any] = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Dict = self.get_image_processor() UpperCamelCase :Tuple = self.get_tokenizer() UpperCamelCase :Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = '''Alexandra,T-shirt的价格是15便士。''' UpperCamelCase :List[str] = self.prepare_image_inputs() UpperCamelCase :List[str] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
658
from __future__ import annotations from collections.abc import Callable def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ): UpperCamelCase :Optional[Any] = x_start UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[int] = 0.0 for _ in range(SCREAMING_SNAKE_CASE__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCamelCase :Any = (x_end - x_start) / steps + xa UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCamelCase :Optional[int] = xa UpperCamelCase :List[str] = fxa return area if __name__ == "__main__": def _A ( SCREAMING_SNAKE_CASE__ : int ): return x**3 + x**2 print("""f(x) = x^3 + x^2""") print("""The area between the curve, x = -5, x = 5 and the x axis is:""") __snake_case = 10 while i <= 10_00_00: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
658
1
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False ): UpperCamelCase :Optional[int] = OmegaConf.load(SCREAMING_SNAKE_CASE__ ) if display: print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE__ ) ) ) return config def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): if conf_path is None: UpperCamelCase :int = '''./model_checkpoints/vqgan_only.yaml''' UpperCamelCase :int = load_config(SCREAMING_SNAKE_CASE__ , display=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :int = VQModel(**config.model.params ) if ckpt_path is None: UpperCamelCase :int = '''./model_checkpoints/vqgan_only.pt''' UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ ) if ".ckpt" in ckpt_path: UpperCamelCase :Tuple = sd['''state_dict'''] model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) del sd return model def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ): UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = model.encode(SCREAMING_SNAKE_CASE__ ) print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' ) UpperCamelCase :Optional[int] = model.decode(SCREAMING_SNAKE_CASE__ ) return xrec def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ): UpperCamelCase , UpperCamelCase :Optional[int] = string.rsplit('''.''' , 1 ) if reload: UpperCamelCase :Any = importlib.import_module(SCREAMING_SNAKE_CASE__ ) importlib.reload(SCREAMING_SNAKE_CASE__ ) return getattr(importlib.import_module(SCREAMING_SNAKE_CASE__ , package=SCREAMING_SNAKE_CASE__ ) , cls ) def _A ( SCREAMING_SNAKE_CASE__ : str ): if "target" not in config: raise KeyError('''Expected key `target` to instantiate.''' ) return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) ) def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True ): UpperCamelCase :str = instantiate_from_config(SCREAMING_SNAKE_CASE__ ) if sd is not None: model.load_state_dict(SCREAMING_SNAKE_CASE__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ): # load the specified checkpoint if ckpt: UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) UpperCamelCase :Union[str, Any] = pl_sd['''global_step'''] print(F'''loaded model from global step {global_step}.''' ) else: UpperCamelCase :List[Any] = {'''state_dict''': None} UpperCamelCase :List[str] = None UpperCamelCase :Optional[int] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=SCREAMING_SNAKE_CASE__ , eval_mode=SCREAMING_SNAKE_CASE__ )['''model'''] return model, global_step
658
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,) UpperCamelCase_ : Any =10 def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :str = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**SCREAMING_SNAKE_CASE_ ) return config def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = 10 UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = scheduler.timesteps[0] UpperCamelCase :Union[str, Any] = scheduler.timesteps[1] UpperCamelCase :str = self.dummy_sample UpperCamelCase :List[str] = 0.1 * sample UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase ( self ) -> List[str]: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :List[Any] = self.scheduler_classes[0] UpperCamelCase :List[Any] = self.get_scheduler_config() UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = scheduler.timesteps UpperCamelCase :Union[str, Any] = torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = self.dummy_model() UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # 1. scale model input UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict noise residual UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. predict previous sample x_t-1 UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :Tuple = pred_prev_sample UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def UpperCAmelCase ( self ) -> str: UpperCamelCase :Dict = self.scheduler_classes[0] UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = scheduler.timesteps UpperCamelCase :int = torch.manual_seed(0 ) UpperCamelCase :str = self.dummy_model() UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict noise residual UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. predict previous sample x_t-1 UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :int = pred_prev_sample UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[str] = self.scheduler_classes[0] UpperCamelCase :Tuple = self.get_scheduler_config() UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :List[str] = self.scheduler_classes[0] UpperCamelCase :List[Any] = self.get_scheduler_config() UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [39, 30, 12, 1, 0] UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[int] = self.scheduler_classes[0] UpperCamelCase :List[str] = self.get_scheduler_config() UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
658
1
def _A ( SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 22 ): UpperCamelCase :Union[str, Any] = range(1 , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Dict = range(1 , SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
658
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __snake_case = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) __snake_case = """https://openaipublic.azureedge.net/jukebox/models/""" __snake_case = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ): if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: UpperCamelCase :List[Any] = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: UpperCamelCase :str = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: UpperCamelCase :Union[str, Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: UpperCamelCase :Tuple = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: UpperCamelCase :Dict = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: UpperCamelCase :Tuple = key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: UpperCamelCase :Union[str, Any] = key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: UpperCamelCase :int = key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :int = {} import re UpperCamelCase :Dict = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) UpperCamelCase :List[str] = re.compile( R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) UpperCamelCase :Optional[Any] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) UpperCamelCase :Any = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) UpperCamelCase :List[str] = re.compile( R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) UpperCamelCase :Union[str, Any] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) UpperCamelCase :List[Any] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) UpperCamelCase :List[str] = re.compile( R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) UpperCamelCase :List[str] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :List[str] = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :int = regex_match.groups() UpperCamelCase :List[Any] = int(groups[2] ) * 2 + int(groups[3] ) UpperCamelCase :str = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' UpperCamelCase :Optional[Any] = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Optional[Any] = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = regex_match.groups() UpperCamelCase :Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) UpperCamelCase :Dict = {'''1''': 1, '''3''': 2}[groups[-2]] UpperCamelCase :Any = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' UpperCamelCase :str = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' UpperCamelCase :str = prefix + resnet_block UpperCamelCase :Optional[int] = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Optional[int] = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = regex_match.groups() UpperCamelCase :Any = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' UpperCamelCase :Tuple = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = regex_match.groups() UpperCamelCase :str = int(groups[2] ) * 2 + int(groups[3] ) - 2 UpperCamelCase :str = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' UpperCamelCase :List[str] = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Optional[Any] = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = regex_match.groups() UpperCamelCase :Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2 UpperCamelCase :Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]] UpperCamelCase :Any = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' UpperCamelCase :Optional[Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' UpperCamelCase :Any = prefix + resnet_block UpperCamelCase :str = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = regex_match.groups() UpperCamelCase :Optional[int] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' UpperCamelCase :Dict = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Optional[Any] = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = regex_match.groups() UpperCamelCase :List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2 UpperCamelCase :Optional[Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' UpperCamelCase :Union[str, Any] = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :List[Any] = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = regex_match.groups() UpperCamelCase :Any = int(groups[1] ) * 2 + int(groups[2] ) - 2 UpperCamelCase :Union[str, Any] = {'''1''': 1, '''3''': 2}[groups[-2]] UpperCamelCase :List[Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' UpperCamelCase :Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' UpperCamelCase :Any = prefix + resnet_block UpperCamelCase :Optional[int] = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :int = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = regex_match.groups() UpperCamelCase :List[str] = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' UpperCamelCase :List[str] = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # keep original key else: UpperCamelCase :List[Any] = original_key UpperCamelCase :Union[str, Any] = replace_key(SCREAMING_SNAKE_CASE__ ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: UpperCamelCase :Tuple = model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) UpperCamelCase :List[Any] = original_key UpperCamelCase :Union[str, Any] = original_key UpperCamelCase :Optional[int] = value return new_dict @torch.no_grad() def _A ( SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ): UpperCamelCase :Tuple = requests.get(F'''{PREFIX}{file}''' , allow_redirects=SCREAMING_SNAKE_CASE__ ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=SCREAMING_SNAKE_CASE__ ) open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , '''wb''' ).write(r.content ) UpperCamelCase :Dict = MODEL_MAPPING[model_name.split('''/''' )[-1]] UpperCamelCase :Optional[int] = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = JukeboxModel(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = [] UpperCamelCase :Union[str, Any] = {} for i, dict_name in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model'''] UpperCamelCase :Optional[int] = {} for k in old_dic.keys(): if k.endswith('''.b''' ): UpperCamelCase :Dict = old_dic[k] elif k.endswith('''.w''' ): UpperCamelCase :Optional[int] = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: UpperCamelCase :Any = old_dic[k] else: UpperCamelCase :List[str] = old_dic[k] UpperCamelCase :Optional[int] = '''vqvae''' if i == 0 else F'''priors.{3 - i}''' UpperCamelCase :List[str] = fix_jukebox_keys(SCREAMING_SNAKE_CASE__ , model.state_dict() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) weight_dict.append(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Dict = weight_dict.pop(0 ) model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) return weight_dict if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) __snake_case = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
658
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: return None class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: return None class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any =[ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> int: from transformers import BertModel UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) vocab_file.flush() UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ ) @require_tf @slow def UpperCAmelCase ( self ) -> str: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return path except Exception as e: self.fail(SCREAMING_SNAKE_CASE_ ) @require_torch @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[str]: from transformers import BertModel UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' ) @require_tf @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[Any]: from transformers import TFBertModel UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Assert all variables are present self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ ) self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
658
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch __snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Any =['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = size if size is not None else {'''shortest_edge''': 224} UpperCamelCase :Any = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256} UpperCamelCase :Any = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) UpperCamelCase :Optional[Any] = do_resize UpperCamelCase :Dict = size UpperCamelCase :Tuple = resample UpperCamelCase :List[Any] = do_rescale UpperCamelCase :Tuple = rescale_factor UpperCamelCase :List[Any] = do_center_crop UpperCamelCase :Dict = crop_size UpperCamelCase :Any = do_flip_channel_order def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray: UpperCamelCase :Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) UpperCamelCase :int = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray: UpperCamelCase :int = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Dict: return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray: return flip_channel_order(SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> PIL.Image.Image: UpperCamelCase :List[Any] = do_resize if do_resize is not None else self.do_resize UpperCamelCase :str = resample if resample is not None else self.resample UpperCamelCase :List[str] = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase :Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase :Union[str, Any] = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) UpperCamelCase :List[str] = size if size is not None else self.size UpperCamelCase :Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = crop_size if crop_size is not None else self.crop_size UpperCamelCase :Dict = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) UpperCamelCase :List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) # All transformations expect numpy arrays. UpperCamelCase :List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: UpperCamelCase :Tuple = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: UpperCamelCase :List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: UpperCamelCase :str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: UpperCamelCase :List[str] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images] UpperCamelCase :Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] UpperCamelCase :List[str] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Union[str, Any]: UpperCamelCase :int = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Union[str, Any] = target_sizes.numpy() UpperCamelCase :Dict = [] for idx in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase :Optional[int] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase :Optional[Any] = logits.argmax(dim=1 ) UpperCamelCase :List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
658
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Union[str, Any] = tempfile.mkdtemp() UpperCamelCase :List[str] = 5 # Realm tok UpperCamelCase :List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def UpperCAmelCase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records ) return config def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Tuple = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=SCREAMING_SNAKE_CASE_ , ) return block_records def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Optional[int] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[Any] = self.get_config() UpperCamelCase :str = self.get_dummy_retriever() UpperCamelCase :int = retriever.tokenizer UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' ) UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids UpperCamelCase :Tuple = tokenizer( ['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids UpperCamelCase :Optional[Any] = config.reader_seq_len UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Union[str, Any] = self.get_config() UpperCamelCase :Union[str, Any] = self.get_dummy_retriever() UpperCamelCase :Dict = retriever.tokenizer UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' ) UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids UpperCamelCase :Optional[Any] = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids UpperCamelCase :Any = config.reader_seq_len UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :str = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: UpperCamelCase :Tuple = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
658
1
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
658
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]: UpperCamelCase :int = parent UpperCamelCase :List[Any] = batch_size UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[int] = max_length UpperCamelCase :Union[str, Any] = num_mel_bins UpperCamelCase :Optional[int] = is_training UpperCamelCase :Dict = use_labels UpperCamelCase :Dict = hidden_size UpperCamelCase :Optional[int] = num_hidden_layers UpperCamelCase :str = num_attention_heads UpperCamelCase :Optional[int] = intermediate_size UpperCamelCase :List[str] = hidden_act UpperCamelCase :List[str] = hidden_dropout_prob UpperCamelCase :List[Any] = attention_probs_dropout_prob UpperCamelCase :str = type_sequence_label_size UpperCamelCase :List[Any] = initializer_range UpperCamelCase :Union[str, Any] = scope UpperCamelCase :List[Any] = frequency_stride UpperCamelCase :Tuple = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension UpperCamelCase :Optional[int] = num_patches + 2 def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :str = self.get_config() return config, input_values, labels def UpperCAmelCase ( self ) -> List[Any]: return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = config_and_inputs UpperCamelCase :List[Any] = {'''input_values''': input_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) UpperCamelCase_ : Any =( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) UpperCamelCase_ : Optional[int] =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Optional[Any] =False UpperCamelCase_ : Dict =False def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = ASTModelTester(self ) UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> str: pass def UpperCAmelCase ( self ) -> int: UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase :Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Any = [*signature.parameters.keys()] UpperCamelCase :Optional[int] = ['''input_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Optional[int]: for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _A ( ): UpperCamelCase :Any = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' ) UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase ( self ) -> Tuple: return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ) if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = self.default_feature_extractor UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.default_feature_extractor UpperCamelCase , UpperCamelCase :Dict = prepare_audio() UpperCamelCase :Dict = audio.squeeze().numpy() UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase :List[Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
658
1
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __snake_case = """▁""" __snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any =BigBirdTokenizer UpperCamelCase_ : Union[str, Any] =BigBirdTokenizerFast UpperCamelCase_ : str =True UpperCamelCase_ : Tuple =True def UpperCAmelCase ( self ) -> Optional[int]: super().setUp() UpperCamelCase :Any = self.tokenizer_class(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Dict = '''<s>''' UpperCamelCase :Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1004 ) def UpperCAmelCase ( self ) -> Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def UpperCAmelCase ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return UpperCamelCase :Optional[Any] = self.get_tokenizer() UpperCamelCase :Tuple = self.get_rust_tokenizer() UpperCamelCase :Dict = '''I was born in 92000, and this is falsé.''' UpperCamelCase :str = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = self.get_rust_tokenizer() UpperCamelCase :Optional[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :Tuple = BigBirdTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [285, 46, 10, 170, 382] , ) UpperCamelCase :Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCamelCase :str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCamelCase :str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def UpperCAmelCase ( self ) -> List[str]: return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Any = '''Hello World!''' UpperCamelCase :Optional[int] = [65, 1_8536, 2260, 101, 66] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :List[Any] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off UpperCamelCase :Tuple = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[Any]: import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence UpperCamelCase :Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCamelCase :Dict = ''' '''.join(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = BigBirdConfig(attention_type='''original_full''' ) UpperCamelCase :Optional[Any] = BigBirdModel(SCREAMING_SNAKE_CASE_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**SCREAMING_SNAKE_CASE_ ) model(**SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Any = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) UpperCamelCase :Dict = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def UpperCAmelCase ( self ) -> List[str]: # fmt: off UpperCamelCase :Optional[Any] = {'''input_ids''': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
658
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): return image elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ): UpperCamelCase :Dict = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 ) UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0 UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase :Tuple = 2.0 * image - 1.0 UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) return image def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ): if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): UpperCamelCase :int = True UpperCamelCase :Dict = va.device UpperCamelCase :List[Any] = va.cpu().numpy() UpperCamelCase :str = va.cpu().numpy() UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) ) if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD: UpperCamelCase :Any = (1 - t) * va + t * va else: UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = theta_a * t UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a UpperCamelCase :Union[str, Any] = sa * va + sa * va if inputs_are_torch: UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) return va def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ): UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ): for param in model.parameters(): UpperCamelCase :Any = value class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str: super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Union[str, Any] = ( feature_extractor.size if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ ) else feature_extractor.size['''shortest_edge'''] ) UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ ) set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: # get the original timestep using init_timestep UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int: if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' ) UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ ) ] UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) else: UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[str] = 0.1_8215 * init_latents UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) # get latents UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = init_latents return latents def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]: UpperCamelCase :List[str] = latents.detach().requires_grad_() UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep] UpperCamelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :str = self.scheduler.sigmas[index] UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :int = 1 / 0.1_8215 * sample UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype ) UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = latents.detach() + grads * (sigma**2) UpperCamelCase :Optional[Any] = noise_pred_original else: UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1: UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1) UpperCamelCase :Tuple = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]] UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ ) if style_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ ) # get prompt text embeddings for content and style UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # set timesteps UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) UpperCamelCase :List[str] = {} if accepts_offset: UpperCamelCase :Tuple = 1 self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device ) UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ ) # Preprocess image UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if clip_guidance_scale > 0: UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = slerp( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase :Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase :Any = content_text_input.input_ids.shape[-1] UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase :int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCamelCase :str = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase :Dict = {} if accepts_eta: UpperCamelCase :int = eta # check if the scheduler accepts generator UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: UpperCamelCase :List[str] = generator with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ): for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # expand the latents if we are doing classifier free guidance UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform classifier free guidance if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 ) UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: UpperCamelCase :int = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) UpperCamelCase , UpperCamelCase :str = self.cond_fn( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
658
1
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate __snake_case = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) __snake_case = [] __snake_case = [] __snake_case = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} __snake_case = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''', """emoji""": True, }, } ] __snake_case = 0 for log in Path().glob("""*.log"""): __snake_case = 0 with open(log, """r""") as f: for line in f: __snake_case = json.loads(line) if line.get("""nodeid""", """""") != "": __snake_case = line["""nodeid"""] if line.get("""duration""", None) is not None: __snake_case = f'''{line["duration"]:.4f}''' if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) __snake_case = [] log.unlink() __snake_case = """""" __snake_case = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" __snake_case = [] __snake_case = {} for test in failed_tests: __snake_case = test[0].split("""::""") __snake_case = data[0].split("""/""")[-1] if data[0] not in filesafailed: __snake_case = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) __snake_case = [test[0] for test in failed_table] __snake_case = list(set(files)) # Count number of instances in failed_tests __snake_case = [] for file in individual_files: table.append([file, len(filesafailed[file])]) __snake_case = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 30_00: __snake_case = """Too many failed tests, please see the full report in the Action results.""" __snake_case = len(err) + 10 __snake_case = message[: 30_00 - offset] + f'''\n...\n```\n{err}''' print(f'''### {message}''') else: __snake_case = """No failed tests! 🤗""" print(f'''## {message}''') payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient __snake_case = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": __snake_case = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) __snake_case = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } payload.append(action_button) __snake_case = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''', } ], } payload.append(date_report) __snake_case = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) __snake_case = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name __snake_case = """""" for i, row in enumerate(test_failures): if row[0] != test_class: __snake_case = row[0] else: __snake_case = """""" __snake_case = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''', }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
658
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :list[list[int]] = [] UpperCamelCase :list[int] = [] UpperCamelCase :List[str] = 0 UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ ) create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return result def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ): if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum: return if sum(SCREAMING_SNAKE_CASE__ ) == max_sum: result.append(SCREAMING_SNAKE_CASE__ ) return for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): create_state_space_tree( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , ) __snake_case = [3, 34, 4, 12, 5, 2] __snake_case = 9 __snake_case = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
658
1
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray ): return input_array.reshape((input_array.size, 1) ) def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Tuple = np.nan for i in range(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Optional[int] = features[:, labels == i] UpperCamelCase :Dict = data.mean(1 ) # Centralize the data of class i UpperCamelCase :Tuple = data - column_reshape(SCREAMING_SNAKE_CASE__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) UpperCamelCase :Optional[Any] = np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T ) return covariance_sum / features.shape[1] def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[str] = features.mean(1 ) UpperCamelCase :Tuple = np.nan for i in range(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = features[:, labels == i] UpperCamelCase :Any = data.shape[1] UpperCamelCase :List[Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ ) , (column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) UpperCamelCase :Any = device_data * np.dot( column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ ) , (column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ )).T , ) return covariance_sum / features.shape[1] def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ): # Check if the features have been loaded if features.any(): UpperCamelCase :int = features.mean(1 ) # Center the dataset UpperCamelCase :Any = features - np.reshape(SCREAMING_SNAKE_CASE__ , (data_mean.size, 1) ) UpperCamelCase :str = np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T ) / features.shape[1] UpperCamelCase , UpperCamelCase :Dict = np.linalg.eigh(SCREAMING_SNAKE_CASE__ ) # Take all the columns in the reverse order (-1), and then takes only the first UpperCamelCase :Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space UpperCamelCase :str = np.dot(filtered_eigenvectors.T , SCREAMING_SNAKE_CASE__ ) logging.info('''Principal Component Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=SCREAMING_SNAKE_CASE__ ) logging.error('''Dataset empty''' ) raise AssertionError def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): assert classes > dimensions # Check if features have been already loaded if features.any: UpperCamelCase , UpperCamelCase :str = eigh( covariance_between_classes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , covariance_within_classes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) UpperCamelCase :Any = eigenvectors[:, ::-1][:, :dimensions] UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = np.linalg.svd(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = svd_matrix[:, 0:dimensions] UpperCamelCase :int = np.dot(filtered_svd_matrix.T , SCREAMING_SNAKE_CASE__ ) logging.info('''Linear Discriminant Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=SCREAMING_SNAKE_CASE__ ) logging.error('''Dataset empty''' ) raise AssertionError def _A ( ): # Create dummy dataset with 2 classes and 3 features UpperCamelCase :Optional[Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) UpperCamelCase :Optional[Any] = np.array([0, 0, 0, 1, 1] ) UpperCamelCase :List[Any] = 2 UpperCamelCase :Any = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(SCREAMING_SNAKE_CASE__ ) as error_info: UpperCamelCase :List[Any] = linear_discriminant_analysis( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): raise AssertionError( '''Did not raise AssertionError for dimensions > classes''' ) assert error_info.type is AssertionError def _A ( ): UpperCamelCase :Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) UpperCamelCase :Optional[Any] = 2 UpperCamelCase :str = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] ) with pytest.raises(SCREAMING_SNAKE_CASE__ ) as error_info: UpperCamelCase :Union[str, Any] = principal_component_analysis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
658
def _A ( SCREAMING_SNAKE_CASE__ : int ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ): if not 0 < percent < 100: raise ValueError('''solution() only accepts values from 0 to 100''' ) UpperCamelCase :Tuple = 0 UpperCamelCase :str = 1 while True: if check_bouncy(SCREAMING_SNAKE_CASE__ ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
658
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { """configuration_trajectory_transformer""": [ """TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrajectoryTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrajectoryTransformerModel""", """TrajectoryTransformerPreTrainedModel""", """load_tf_weights_in_trajectory_transformer""", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
def _A ( SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :Union[str, Any] = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) UpperCamelCase :str = hex_num[0] == '''-''' if is_negative: UpperCamelCase :Union[str, Any] = hex_num[1:] try: UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) UpperCamelCase :Dict = '''''' while int_num > 0: UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
658
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 * 4 , SCREAMING_SNAKE_CASE_=32 * 6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=32 , ) -> List[Any]: UpperCamelCase :Any = parent UpperCamelCase :Any = batch_size UpperCamelCase :Any = is_training UpperCamelCase :Union[str, Any] = use_auxiliary_loss UpperCamelCase :Tuple = num_queries UpperCamelCase :Any = num_channels UpperCamelCase :List[str] = min_size UpperCamelCase :Dict = max_size UpperCamelCase :List[str] = num_labels UpperCamelCase :Tuple = mask_feature_size def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE_ ) > 0.5 ).float() UpperCamelCase :Any = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE_ ) > 0.5).long() UpperCamelCase :Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase ( self ) -> str: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs() UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase :int = output.encoder_hidden_states UpperCamelCase :Union[str, Any] = output.pixel_decoder_hidden_states UpperCamelCase :Union[str, Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , config.decoder_config.decoder_layers ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Tuple: with torch.no_grad(): UpperCamelCase :List[str] = MaskFormerModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Optional[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase :Any = MaskFormerForInstanceSegmentation(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE_ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCamelCase :Dict = model(pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) comm_check_on_output(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = model( pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ ) comm_check_on_output(SCREAMING_SNAKE_CASE_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : int =(MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () UpperCamelCase_ : str =( {'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) UpperCamelCase_ : Optional[int] =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : str =False UpperCamelCase_ : Optional[int] =False def UpperCAmelCase ( self ) -> str: UpperCamelCase :Any = MaskFormerModelTester(self ) UpperCamelCase :Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Dict: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> Union[str, Any]: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def UpperCAmelCase ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def UpperCAmelCase ( self ) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCAmelCase ( self ) -> Tuple: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase ( self ) -> Dict: pass def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase , UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Any = [*signature.parameters.keys()] UpperCamelCase :Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Any: for model_name in ["facebook/maskformer-swin-small-coco"]: UpperCamelCase :Dict = MaskFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :List[str] = (self.model_tester.min_size,) * 2 UpperCamelCase :List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE_ ), '''mask_labels''': torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE_ ), '''class_labels''': torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE_ ).long(), } UpperCamelCase :List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = model(**SCREAMING_SNAKE_CASE_ ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Tuple = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase ( self ) -> Dict: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss UpperCamelCase :Optional[int] = self.all_model_classes[1] UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() UpperCamelCase :Any = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.train() UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ ).loss loss.backward() def UpperCAmelCase ( self ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss UpperCamelCase :Optional[int] = self.all_model_classes[1] UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() UpperCamelCase :Any = True UpperCamelCase :Optional[Any] = True UpperCamelCase :str = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.train() UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCamelCase :int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't UpperCamelCase :Any = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCamelCase :Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __snake_case = 1E-4 def _A ( ): UpperCamelCase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase ( self ) -> Tuple: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Optional[int] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.default_image_processor UpperCamelCase :Any = prepare_img() UpperCamelCase :Tuple = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 800, 1088) ) with torch.no_grad(): UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Dict = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Optional[Any] = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(SCREAMING_SNAKE_CASE_ ) .eval() ) UpperCamelCase :Union[str, Any] = self.default_image_processor UpperCamelCase :Union[str, Any] = prepare_img() UpperCamelCase :Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 800, 1088) ) with torch.no_grad(): UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ) # masks_queries_logits UpperCamelCase :int = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) UpperCamelCase :Any = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] UpperCamelCase :List[str] = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) ) # class_queries_logits UpperCamelCase :Union[str, Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) UpperCamelCase :Tuple = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(SCREAMING_SNAKE_CASE_ ) .eval() ) UpperCamelCase :Dict = self.default_image_processor UpperCamelCase :int = prepare_img() UpperCamelCase :Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 800, 1088) ) with torch.no_grad(): UpperCamelCase :str = model(**SCREAMING_SNAKE_CASE_ ) # masks_queries_logits UpperCamelCase :Any = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) UpperCamelCase :Optional[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] UpperCamelCase :str = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) ) # class_queries_logits UpperCamelCase :Any = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) UpperCamelCase :Tuple = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(SCREAMING_SNAKE_CASE_ ) .eval() ) UpperCamelCase :List[str] = self.default_image_processor UpperCamelCase :Optional[Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , ) UpperCamelCase :Tuple = inputs['''pixel_values'''].to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['''mask_labels''']] UpperCamelCase :Any = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['''class_labels''']] with torch.no_grad(): UpperCamelCase :str = model(**SCREAMING_SNAKE_CASE_ ) self.assertTrue(outputs.loss is not None )
658
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase , UpperCamelCase :List[Any] = position UpperCamelCase :Any = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCamelCase :Dict = [] for position in positions: UpperCamelCase , UpperCamelCase :str = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE__ ) return permissible_positions def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): if is_complete(SCREAMING_SNAKE_CASE__ ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): UpperCamelCase , UpperCamelCase :Optional[int] = position if board[y][x] == 0: UpperCamelCase :Any = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ): return True UpperCamelCase :Union[str, Any] = 0 return False def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ): return board UpperCamelCase :str = 0 UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
658
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = """▁""" __snake_case = {"""vocab_file""": """sentencepiece.bpe.model"""} __snake_case = { """vocab_file""": { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model""" ), } } __snake_case = { """xlm-roberta-base""": 5_12, """xlm-roberta-large""": 5_12, """xlm-roberta-large-finetuned-conll02-dutch""": 5_12, """xlm-roberta-large-finetuned-conll02-spanish""": 5_12, """xlm-roberta-large-finetuned-conll03-english""": 5_12, """xlm-roberta-large-finetuned-conll03-german""": 5_12, } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Tuple =VOCAB_FILES_NAMES UpperCamelCase_ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['input_ids', 'attention_mask'] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase :int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token UpperCamelCase :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token UpperCamelCase :Union[str, Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCamelCase :int = 1 UpperCamelCase :List[str] = len(self.sp_model ) + self.fairseq_offset UpperCamelCase :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Any: UpperCamelCase :Any = self.__dict__.copy() UpperCamelCase :Any = None UpperCamelCase :Tuple = self.sp_model.serialized_model_proto() return state def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCamelCase :str = {} UpperCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase :Union[str, Any] = [self.cls_token_id] UpperCamelCase :int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase :Tuple = [self.sep_token_id] UpperCamelCase :List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase ( self ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCamelCase :List[Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :List[Any] = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip() return out_string def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase :int = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi: UpperCamelCase :Optional[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
658
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('''foo.json''',)] ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :int = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' ) UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = GenerationConfig() UpperCamelCase :List[str] = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ ) # update_kwargs was not modified (no side effects) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[Any] = GenerationConfig() UpperCamelCase :Tuple = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Dict = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(default_config.num_beams , 1 ) UpperCamelCase :Tuple = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @classmethod def UpperCAmelCase ( cls ) -> Optional[Any]: UpperCamelCase :List[str] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def UpperCAmelCase ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Optional[Any] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
658
1
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def _A ( SCREAMING_SNAKE_CASE__ : Dict ): UpperCamelCase :List[Any] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ): UpperCamelCase :List[str] = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: UpperCamelCase :str = s_dict.pop(SCREAMING_SNAKE_CASE__ ) elif "subsample" in key: UpperCamelCase :Union[str, Any] = s_dict.pop(SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ): UpperCamelCase , UpperCamelCase :List[Any] = emb.weight.shape UpperCamelCase :Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = emb.weight.data return lin_layer def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): UpperCamelCase :Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) UpperCamelCase :Dict = mam_aaa['''args'''] UpperCamelCase :Dict = mam_aaa['''model'''] UpperCamelCase :int = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) rename_keys(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = state_dict['''decoder.embed_tokens.weight'''].shape[0] UpperCamelCase :Optional[Any] = args.share_decoder_input_output_embed UpperCamelCase :Optional[int] = [int(SCREAMING_SNAKE_CASE__ ) for i in args.conv_kernel_sizes.split(''',''' )] UpperCamelCase :Optional[Any] = SpeechaTextConfig( vocab_size=SCREAMING_SNAKE_CASE__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(SCREAMING_SNAKE_CASE__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , num_beams=5 , max_length=200 , use_cache=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE__ , ) UpperCamelCase :List[Any] = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE__ ) UpperCamelCase , UpperCamelCase :str = model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0 and not set(SCREAMING_SNAKE_CASE__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F''' but all the following weights are missing {missing}''' ) if tie_embeds: UpperCamelCase :List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCamelCase :Any = lm_head_weights model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") __snake_case = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
658
def _A ( SCREAMING_SNAKE_CASE__ : int ): if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
658
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Any ='vivit' def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=[2, 16, 16] , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu_fast" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-06 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: UpperCamelCase :List[str] = hidden_size UpperCamelCase :int = num_hidden_layers UpperCamelCase :List[Any] = num_attention_heads UpperCamelCase :Optional[Any] = intermediate_size UpperCamelCase :List[Any] = hidden_act UpperCamelCase :Any = hidden_dropout_prob UpperCamelCase :Tuple = attention_probs_dropout_prob UpperCamelCase :Dict = initializer_range UpperCamelCase :Tuple = layer_norm_eps UpperCamelCase :str = image_size UpperCamelCase :str = num_frames UpperCamelCase :List[str] = tubelet_size UpperCamelCase :List[str] = num_channels UpperCamelCase :int = qkv_bias super().__init__(**SCREAMING_SNAKE_CASE_ )
658
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool __snake_case = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M' UpperCamelCase_ : Optional[Any] =( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) UpperCamelCase_ : Dict ='translator' UpperCamelCase_ : Any =AutoTokenizer UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM UpperCamelCase_ : List[Any] =LANGUAGE_CODES UpperCamelCase_ : int =['text', 'text', 'text'] UpperCamelCase_ : Union[str, Any] =['text'] def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) UpperCamelCase :Optional[int] = self.lang_to_code[src_lang] UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return self.model.generate(**SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
658
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) __snake_case = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } __snake_case = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ): for attribute in key.split('''.''' ): UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if weight_type is not None: UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape else: UpperCamelCase :Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase :int = value elif weight_type == "weight_g": UpperCamelCase :Dict = value elif weight_type == "weight_v": UpperCamelCase :Optional[Any] = value elif weight_type == "bias": UpperCamelCase :Any = value elif weight_type == "running_mean": UpperCamelCase :int = value elif weight_type == "running_var": UpperCamelCase :Tuple = value elif weight_type == "num_batches_tracked": UpperCamelCase :Optional[int] = value elif weight_type == "inv_freq": UpperCamelCase :int = value else: UpperCamelCase :Dict = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ): UpperCamelCase :Optional[Any] = [] UpperCamelCase :Optional[int] = fairseq_model.state_dict() UpperCamelCase :Dict = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase :Any = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCamelCase :List[Any] = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase :str = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCamelCase :str = True if "*" in mapped_key: UpperCamelCase :Union[str, Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2] UpperCamelCase :Optional[Any] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE__ ) if "pos_bias_u" in name: UpperCamelCase :Optional[int] = None elif "pos_bias_v" in name: UpperCamelCase :int = None elif "weight_g" in name: UpperCamelCase :List[Any] = '''weight_g''' elif "weight_v" in name: UpperCamelCase :List[Any] = '''weight_v''' elif "bias" in name: UpperCamelCase :Union[str, Any] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase :Optional[int] = '''weight''' elif "running_mean" in name: UpperCamelCase :int = '''running_mean''' elif "inv_freq" in name: UpperCamelCase :Optional[Any] = '''inv_freq''' elif "running_var" in name: UpperCamelCase :List[Any] = '''running_var''' elif "num_batches_tracked" in name: UpperCamelCase :Dict = '''num_batches_tracked''' else: UpperCamelCase :List[str] = None set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ): UpperCamelCase :Union[str, Any] = full_name.split('''conv_layers.''' )[-1] UpperCamelCase :Union[str, Any] = name.split('''.''' ) UpperCamelCase :Tuple = int(items[0] ) UpperCamelCase :List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase :Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase :int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase :Dict = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase :Union[str, Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int=True ): if config_path is not None: UpperCamelCase :str = WavaVecaConformerConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , hidden_act='''swish''' ) else: UpperCamelCase :int = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase :Union[str, Any] = '''rotary''' if is_finetuned: if dict_path: UpperCamelCase :Dict = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase :int = target_dict.pad_index UpperCamelCase :Union[str, Any] = target_dict.bos_index UpperCamelCase :Dict = target_dict.eos_index UpperCamelCase :int = len(target_dict.symbols ) UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Dict = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase :int = 0 UpperCamelCase :Optional[int] = 1 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCamelCase :Optional[int] = True if config.feat_extract_norm == '''layer''' else False UpperCamelCase :Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCamelCase :Union[str, Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = WavaVecaConformerForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCamelCase :str = WavaVecaConformerForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: UpperCamelCase :Optional[int] = argparse.Namespace(task='''audio_pretraining''' ) UpperCamelCase :List[str] = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __snake_case = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
658
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __snake_case = 10 def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if array[i] == target: return i return -1 def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Tuple = 0 UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ ) while left <= right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1 UpperCamelCase :str = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: UpperCamelCase :int = one_third - 1 elif array[two_third] < target: UpperCamelCase :Any = two_third + 1 else: UpperCamelCase :Any = one_third + 1 UpperCamelCase :int = two_third - 1 else: return -1 def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): if left < right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = (left + right) // 3 + 1 UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __snake_case = input("""Enter numbers separated by comma:\n""").strip() __snake_case = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __snake_case = int(input("""Enter the number to be found in the list:\n""").strip()) __snake_case = ite_ternary_search(collection, target) __snake_case = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
658
1
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _A ( SCREAMING_SNAKE_CASE__ : Union[dict, list, tuple, torch.Tensor] ): UpperCamelCase :int = [] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for v in tree.values(): shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE__ ) ) elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE__ ) ) elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple[int, ...] ): UpperCamelCase :Dict = [] for d in reversed(SCREAMING_SNAKE_CASE__ ): idx.append(flat_idx % d ) UpperCamelCase :int = flat_idx // d return tuple(reversed(SCREAMING_SNAKE_CASE__ ) ) @torch.jit.ignore def _A ( SCREAMING_SNAKE_CASE__ : Sequence[int] , SCREAMING_SNAKE_CASE__ : Sequence[int] , SCREAMING_SNAKE_CASE__ : Sequence[int] , SCREAMING_SNAKE_CASE__ : Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE__ : Optional[Sequence[bool]] = None , ): # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(SCREAMING_SNAKE_CASE__ : List[bool] ) -> None: UpperCamelCase :Optional[int] = True for i in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCamelCase :List[str] = -1 * (i + 1) l[reversed_idx] &= tally UpperCamelCase :Dict = l[reversed_idx] if start_edges is None: UpperCamelCase :int = [s == 0 for s in start] reduce_edge_list(SCREAMING_SNAKE_CASE__ ) if end_edges is None: UpperCamelCase :Dict = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] reduce_edge_list(SCREAMING_SNAKE_CASE__ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(SCREAMING_SNAKE_CASE__ ) == 0: return [()] elif len(SCREAMING_SNAKE_CASE__ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] UpperCamelCase :List[Tuple[slice, ...]] = [] UpperCamelCase :List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if s == e: path_list.append(slice(SCREAMING_SNAKE_CASE__ , s + 1 ) ) else: break UpperCamelCase :Tuple[slice, ...] = tuple(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = len(SCREAMING_SNAKE_CASE__ ) # start == end, and we're done if divergence_idx == len(SCREAMING_SNAKE_CASE__ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase :List[str] = start[divergence_idx] return tuple( path + (slice(SCREAMING_SNAKE_CASE__ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase :List[Any] = end[divergence_idx] return tuple( path + (slice(SCREAMING_SNAKE_CASE__ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) UpperCamelCase :Optional[Any] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _A ( SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[str] = t.shape[:no_batch_dims] UpperCamelCase :Optional[int] = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # _get_minimal_slice_set is inclusive UpperCamelCase :List[str] = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE__ ) ) # Get an ordered list of slices to perform UpperCamelCase :List[Any] = _get_minimal_slice_set( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) UpperCamelCase :str = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _A ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : Dict[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : bool = False , ): if not (len(SCREAMING_SNAKE_CASE__ ) > 0): raise ValueError('''Must provide at least one input''' ) UpperCamelCase :List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE__ )] UpperCamelCase :Optional[int] = tuple([max(SCREAMING_SNAKE_CASE__ ) for s in zip(*SCREAMING_SNAKE_CASE__ )] ) def _prep_inputs(SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: UpperCamelCase :str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) UpperCamelCase :Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: UpperCamelCase :List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t UpperCamelCase :Dict[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Dict = None if _out is not None: UpperCamelCase :Union[str, Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) UpperCamelCase :Optional[int] = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCamelCase :Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCamelCase :Dict = 0 UpperCamelCase :Optional[Any] = prepped_outputs for _ in range(SCREAMING_SNAKE_CASE__ ): # Chunk the input if not low_mem: UpperCamelCase :List[Any] = _select_chunk else: UpperCamelCase :Union[str, Any] = partial( _chunk_slice , flat_start=SCREAMING_SNAKE_CASE__ , flat_end=min(SCREAMING_SNAKE_CASE__ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE__ ) , ) UpperCamelCase :Dict[str, Any] = tensor_tree_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Run the layer on the chunk UpperCamelCase :str = layer(**SCREAMING_SNAKE_CASE__ ) # Allocate space for the output if out is None: UpperCamelCase :Optional[int] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE__ ) # Put the chunk in its pre-allocated space if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): def assign(SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> None: for k, v in da.items(): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): assign(SCREAMING_SNAKE_CASE__ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCamelCase :Optional[int] = da[k] assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for xa, xa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCamelCase :Optional[int] = xa elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCamelCase :Tuple = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size UpperCamelCase :List[Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE__ ) return out class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = 512 , ) -> Optional[Any]: UpperCamelCase :Optional[Any] = max_chunk_size UpperCamelCase :Optional[int] = None UpperCamelCase :Optional[tuple] = None def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCamelCase :List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] UpperCamelCase :Union[str, Any] = [c for c in candidates if c > min_chunk_size] UpperCamelCase :List[str] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(SCREAMING_SNAKE_CASE_ ) -> bool: try: with torch.no_grad(): fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ ) return True except RuntimeError: return False UpperCamelCase :Optional[Any] = 0 UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE_ ) - 1 while i > min_viable_chunk_size_index: UpperCamelCase :int = test_chunk_size(candidates[i] ) if not viable: UpperCamelCase :Optional[int] = (min_viable_chunk_size_index + i) // 2 else: UpperCamelCase :str = i UpperCamelCase :Optional[int] = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2 return candidates[min_viable_chunk_size_index] def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool: UpperCamelCase :Union[str, Any] = True for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )] UpperCamelCase :Any = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )] consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: consistent &= aa == aa return consistent def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> int: UpperCamelCase :Optional[Any] = True UpperCamelCase :tuple = tree_map(lambda SCREAMING_SNAKE_CASE_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ ) else: # Otherwise, we can reuse the precomputed value UpperCamelCase :str = False if not consistent: UpperCamelCase :int = self._determine_favorable_chunk_size( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Optional[Any] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
658
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if n == 0: return 0 UpperCamelCase :Union[str, Any] = float('''-inf''' ) for i in range(1 , n + 1 ): UpperCamelCase :str = max( SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) ) return max_revue def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: UpperCamelCase :Dict = float('''-inf''' ) for i in range(1 , n + 1 ): UpperCamelCase :Union[str, Any] = max( SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) UpperCamelCase :str = max_revenue return max_rev[n] def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )] UpperCamelCase :Dict = 0 for i in range(1 , n + 1 ): UpperCamelCase :Optional[Any] = max_rev[i] for j in range(1 , i + 1 ): UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] ) UpperCamelCase :Tuple = max_revenue_i return max_rev[n] def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): if n < 0: UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if n > len(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Union[str, Any] = ( '''Each integral piece of rod must have a corresponding price. ''' F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) def _A ( ): UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23] UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. UpperCamelCase :str = 36 UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
658
1
import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase :Any = str(id_ ) UpperCamelCase :Union[str, Any] = None UpperCamelCase :Any = None UpperCamelCase :int = [] UpperCamelCase :List[str] = {} # {vertex:distance} def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> str: return self.key < other.key def __repr__( self ) -> Any: return self.id def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: self.neighbors.append(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :Dict = weight def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , SCREAMING_SNAKE_CASE__ ) graph[b - 1].add_edge(graph[a - 1] , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : Vertex ): UpperCamelCase :List[Any] = [] for u in graph: UpperCamelCase :Any = math.inf UpperCamelCase :Optional[Any] = None UpperCamelCase :int = 0 UpperCamelCase :str = graph[:] while q: UpperCamelCase :str = min(SCREAMING_SNAKE_CASE__ ) q.remove(SCREAMING_SNAKE_CASE__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): UpperCamelCase :str = u UpperCamelCase :Tuple = u.edges[v.id] for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _A ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : Vertex ): for u in graph: UpperCamelCase :Dict = math.inf UpperCamelCase :Optional[int] = None UpperCamelCase :List[Any] = 0 UpperCamelCase :str = list(SCREAMING_SNAKE_CASE__ ) hq.heapify(SCREAMING_SNAKE_CASE__ ) while h: UpperCamelCase :Any = hq.heappop(SCREAMING_SNAKE_CASE__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): UpperCamelCase :Union[str, Any] = u UpperCamelCase :List[Any] = u.edges[v.id] hq.heapify(SCREAMING_SNAKE_CASE__ ) for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _A ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
658
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case = logging.get_logger(__name__) __snake_case = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class UpperCAmelCase_ ( lowercase, lowercase ): """simple docstring""" UpperCamelCase_ : int ='focalnet' def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = image_size UpperCamelCase :Dict = patch_size UpperCamelCase :Tuple = num_channels UpperCamelCase :int = embed_dim UpperCamelCase :Optional[Any] = use_conv_embed UpperCamelCase :str = hidden_sizes UpperCamelCase :str = depths UpperCamelCase :Optional[int] = focal_levels UpperCamelCase :Tuple = focal_windows UpperCamelCase :Optional[int] = hidden_act UpperCamelCase :Optional[int] = mlp_ratio UpperCamelCase :Optional[Any] = hidden_dropout_prob UpperCamelCase :int = drop_path_rate UpperCamelCase :Dict = use_layerscale UpperCamelCase :List[str] = layerscale_value UpperCamelCase :Tuple = use_post_layernorm UpperCamelCase :int = use_post_layernorm_in_modulation UpperCamelCase :str = normalize_modulator UpperCamelCase :Any = initializer_range UpperCamelCase :Optional[Any] = layer_norm_eps UpperCamelCase :Dict = encoder_stride UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
658
1
import requests from bsa import BeautifulSoup def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict ): UpperCamelCase :Dict = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ ).content , '''html.parser''' ) UpperCamelCase :Optional[int] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) UpperCamelCase :str = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": __snake_case = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 20_18, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
658
import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int: UpperCamelCase :Union[str, Any] = parent UpperCamelCase :Tuple = batch_size UpperCamelCase :Optional[Any] = image_size UpperCamelCase :Any = patch_size UpperCamelCase :List[str] = num_channels UpperCamelCase :int = is_training UpperCamelCase :str = use_labels UpperCamelCase :Optional[Any] = hidden_size UpperCamelCase :int = num_hidden_layers UpperCamelCase :List[Any] = backbone_out_indices UpperCamelCase :str = num_attention_heads UpperCamelCase :Tuple = intermediate_size UpperCamelCase :Optional[int] = hidden_act UpperCamelCase :List[Any] = hidden_dropout_prob UpperCamelCase :List[str] = attention_probs_dropout_prob UpperCamelCase :Union[str, Any] = initializer_range UpperCamelCase :List[Any] = num_labels UpperCamelCase :int = backbone_featmap_shape UpperCamelCase :Any = scope UpperCamelCase :int = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase :Dict = (image_size // patch_size) ** 2 UpperCamelCase :List[str] = num_patches + 1 def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase :Optional[int] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Any = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [96, 192, 384, 768], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase :Optional[Any] = self.num_labels UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase :Optional[int] = self.num_labels UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Dict = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () UpperCamelCase_ : Tuple =( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase_ : Tuple =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Tuple =False def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Union[str, Any] = DPTModelTester(self ) UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> int: pass def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase :int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Optional[int] = [*signature.parameters.keys()] UpperCamelCase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[str]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Any = True if model_class in get_values(SCREAMING_SNAKE_CASE_ ): continue UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.train() UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def UpperCAmelCase ( self ) -> Tuple: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Optional[Any] = False UpperCamelCase :List[Any] = True if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing: continue UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.gradient_checkpointing_enable() model.train() UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ ) # Skip the check for the backbone UpperCamelCase :Optional[int] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase ( self ) -> Any: pass @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Union[str, Any] = '''add''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) def _A ( ): UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = prepare_img() UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = outputs.predicted_depth # verify the predicted depth UpperCamelCase :int = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
658
1
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCAmelCase_ : """simple docstring""" UpperCamelCase_ : List[Any] =LEDConfig UpperCamelCase_ : Optional[Any] ={} UpperCamelCase_ : int ='gelu' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=4 , ) -> int: UpperCamelCase :Optional[int] = parent UpperCamelCase :str = batch_size UpperCamelCase :List[str] = seq_length UpperCamelCase :Union[str, Any] = is_training UpperCamelCase :Optional[Any] = use_labels UpperCamelCase :List[Any] = vocab_size UpperCamelCase :str = hidden_size UpperCamelCase :Optional[Any] = num_hidden_layers UpperCamelCase :Union[str, Any] = num_attention_heads UpperCamelCase :Union[str, Any] = intermediate_size UpperCamelCase :str = hidden_dropout_prob UpperCamelCase :str = attention_probs_dropout_prob UpperCamelCase :str = max_position_embeddings UpperCamelCase :List[str] = eos_token_id UpperCamelCase :Dict = pad_token_id UpperCamelCase :Optional[int] = bos_token_id UpperCamelCase :Optional[int] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after UpperCamelCase :Optional[int] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests UpperCamelCase :Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase :Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase :Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :Dict = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) UpperCamelCase :int = prepare_led_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = tf.concat( [tf.zeros_like(SCREAMING_SNAKE_CASE_ )[:, :-1], tf.ones_like(SCREAMING_SNAKE_CASE_ )[:, -1:]] , axis=-1 , ) UpperCamelCase :Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase :Optional[int] = TFLEDModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder() UpperCamelCase :Any = inputs_dict['''input_ids'''] UpperCamelCase :Any = input_ids[:1, :] UpperCamelCase :str = inputs_dict['''attention_mask'''][:1, :] UpperCamelCase :Optional[int] = 1 # first forward pass UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase :Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase :Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase :Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCamelCase :Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCamelCase :str = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCamelCase :Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCamelCase :List[str] = output_from_no_past[:, -3:, random_slice_idx] UpperCamelCase :List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-3 ) def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ): if attention_mask is None: UpperCamelCase :Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCamelCase :Optional[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCamelCase :Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCamelCase :Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Tuple =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () UpperCamelCase_ : Any =(TFLEDForConditionalGeneration,) if is_tf_available() else () UpperCamelCase_ : Any =( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase_ : Optional[Any] =True UpperCamelCase_ : int =False UpperCamelCase_ : Any =False UpperCamelCase_ : Tuple =False def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :List[str] = TFLEDModelTester(self ) UpperCamelCase :str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Union[str, Any] = tf.zeros_like(inputs_dict['''attention_mask'''] ) UpperCamelCase :Dict = 2 UpperCamelCase :Union[str, Any] = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) UpperCamelCase :Union[str, Any] = True UpperCamelCase :str = self.model_tester.seq_length UpperCamelCase :Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :List[Any] = outputs.decoder_attentions self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Optional[Any] = [t.numpy() for t in outputs.encoder_attentions] UpperCamelCase :int = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: UpperCamelCase :Union[str, Any] = True UpperCamelCase :Optional[Any] = False UpperCamelCase :int = False UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = model(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE_ ) self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE_ ) check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ ) if self.is_encoder_decoder: UpperCamelCase :Tuple = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = model(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE_ ) check_decoder_attentions_output(SCREAMING_SNAKE_CASE_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCamelCase :str = True UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE_ ) check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ ) # Check attention is always last and order is fine UpperCamelCase :Tuple = True UpperCamelCase :int = True UpperCamelCase :Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = model(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE_ ) check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase ( self ) -> Optional[int]: pass def UpperCAmelCase ( self ) -> int: # TODO: Head-masking not yet implement pass def _A ( SCREAMING_SNAKE_CASE__ : Tuple ): return tf.constant(SCREAMING_SNAKE_CASE__ , dtype=tf.intaa ) __snake_case = 1E-4 @slow @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here UpperCamelCase :Union[str, Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) UpperCamelCase :Optional[Any] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) UpperCamelCase :Union[str, Any] = prepare_led_inputs_dict(model.config , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase :List[Any] = (1, 1024, 768) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) # change to expected output here UpperCamelCase :Union[str, Any] = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here UpperCamelCase :Optional[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) UpperCamelCase :Optional[Any] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) UpperCamelCase :Dict = prepare_led_inputs_dict(model.config , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase :Optional[Any] = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) # change to expected output here UpperCamelCase :List[str] = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-3 , rtol=1e-3 )
658
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Union[str, Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCamelCase :Any = 128 elif "12-12" in model_name: UpperCamelCase :Union[str, Any] = 12 UpperCamelCase :Any = 12 elif "14-14" in model_name: UpperCamelCase :Optional[int] = 14 UpperCamelCase :List[str] = 14 elif "16-16" in model_name: UpperCamelCase :List[Any] = 16 UpperCamelCase :Optional[Any] = 16 else: raise ValueError('''Model not supported''' ) UpperCamelCase :Tuple = '''huggingface/label-files''' if "speech-commands" in model_name: UpperCamelCase :Optional[Any] = 35 UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json''' else: UpperCamelCase :Optional[int] = 527 UpperCamelCase :List[Any] = '''audioset-id2label.json''' UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} UpperCamelCase :List[Any] = idalabel UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()} return config def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): if "module.v" in name: UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ): for key in orig_state_dict.copy().keys(): UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: UpperCamelCase :Any = key.split('''.''' ) UpperCamelCase :str = int(key_split[3] ) UpperCamelCase :Union[str, Any] = config.hidden_size if "weight" in key: UpperCamelCase :List[str] = val[:dim, :] UpperCamelCase :Optional[Any] = val[dim : dim * 2, :] UpperCamelCase :Optional[Any] = val[-dim:, :] else: UpperCamelCase :Dict = val[:dim] UpperCamelCase :Optional[int] = val[dim : dim * 2] UpperCamelCase :List[Any] = val[-dim:] else: UpperCamelCase :Union[str, Any] = val return orig_state_dict def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[str] = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ): UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict UpperCamelCase :Optional[int] = model_name_to_url[model_name] UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) # remove some keys remove_keys(SCREAMING_SNAKE_CASE__ ) # rename some keys UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load 🤗 model UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78 UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26 UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128 UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) if "speech-commands" in model_name: UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array'''] else: UpperCamelCase :List[Any] = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = waveform.squeeze().numpy() UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' ) # forward pass UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __snake_case = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
658
1
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ ( enum.Enum ): """simple docstring""" UpperCamelCase_ : int =0 UpperCamelCase_ : Optional[int] =1 UpperCamelCase_ : Dict =2 @add_end_docstrings(lowercase ) class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[Any] ='\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCamelCase :Dict = None if self.model.config.prefix is not None: UpperCamelCase :List[Any] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCamelCase :Optional[int] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = self._sanitize_parameters(prefix=SCREAMING_SNAKE_CASE_ , **self._forward_params ) UpperCamelCase :Optional[Any] = {**self._preprocess_params, **preprocess_params} UpperCamelCase :List[str] = {**self._forward_params, **forward_params} def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]: UpperCamelCase :Optional[int] = {} if prefix is not None: UpperCamelCase :Tuple = prefix if prefix: UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) UpperCamelCase :List[str] = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ''' [None, \'hole\']''' ) UpperCamelCase :Any = handle_long_generation preprocess_params.update(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = generate_kwargs UpperCamelCase :int = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) UpperCamelCase :Optional[int] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) UpperCamelCase :Tuple = ReturnType.TENSORS if return_type is not None: UpperCamelCase :Any = return_type if clean_up_tokenization_spaces is not None: UpperCamelCase :Optional[int] = clean_up_tokenization_spaces if stop_sequence is not None: UpperCamelCase :Optional[Any] = self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) UpperCamelCase :Any = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase :Union[str, Any] = self.tokenizer( prefix + prompt_text , padding=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) UpperCamelCase :Dict = prompt_text if handle_long_generation == "hole": UpperCamelCase :List[Any] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCamelCase :str = generate_kwargs['''max_new_tokens'''] else: UpperCamelCase :Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCamelCase :Any = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) UpperCamelCase :Union[str, Any] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: UpperCamelCase :List[str] = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :str = model_inputs['''input_ids'''] UpperCamelCase :List[Any] = model_inputs.get('''attention_mask''' , SCREAMING_SNAKE_CASE_ ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCamelCase :str = None UpperCamelCase :List[Any] = None UpperCamelCase :Optional[Any] = 1 else: UpperCamelCase :Tuple = input_ids.shape[0] UpperCamelCase :List[Any] = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCamelCase :int = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: UpperCamelCase :Optional[Any] = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: UpperCamelCase :List[str] = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCamelCase :str = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCamelCase :List[Any] = self.model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = generated_sequence.shape[0] if self.framework == "pt": UpperCamelCase :List[str] = generated_sequence.reshape(SCREAMING_SNAKE_CASE_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCamelCase :List[Any] = tf.reshape(SCREAMING_SNAKE_CASE_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=ReturnType.FULL_TEXT , SCREAMING_SNAKE_CASE_=True ) -> Union[str, Any]: UpperCamelCase :Dict = model_outputs['''generated_sequence'''][0] UpperCamelCase :Tuple = model_outputs['''input_ids'''] UpperCamelCase :List[str] = model_outputs['''prompt_text'''] UpperCamelCase :Tuple = generated_sequence.numpy().tolist() UpperCamelCase :Tuple = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCamelCase :List[str] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCamelCase :List[Any] = self.tokenizer.decode( SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCamelCase :Union[str, Any] = 0 else: UpperCamelCase :Optional[int] = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , ) ) if return_type == ReturnType.FULL_TEXT: UpperCamelCase :Optional[int] = prompt_text + text[prompt_length:] else: UpperCamelCase :Union[str, Any] = text[prompt_length:] UpperCamelCase :List[Any] = {'''generated_text''': all_text} records.append(SCREAMING_SNAKE_CASE_ ) return records
658
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
1
from __future__ import annotations import os from typing import Any import requests __snake_case = """https://api.github.com""" # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user __snake_case = BASE_URL + """/user""" # https://github.com/settings/tokens __snake_case = os.environ.get("""USER_TOKEN""", """""") def _A ( SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :List[str] = { '''Authorization''': F'''token {auth_token}''', '''Accept''': '''application/vnd.github.v3+json''', } return requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f'''{key}: {value}''') else: raise ValueError("""'USER_TOKEN' field cannot be empty.""")
658
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __snake_case = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ __snake_case = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ __snake_case = R""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase :Tuple = 0.0 for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0 UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ ) return { "accuracy": accuracy, }
658
1
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ): UpperCamelCase :Union[str, Any] = hf_hub_url(repo_id=SCREAMING_SNAKE_CASE__ , path=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(SCREAMING_SNAKE_CASE__ )}'''
658
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow __snake_case = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ """text-classification""", """language-modeling""", """summarization""", """token-classification""", """question-answering""", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) __snake_case = logging.getLogger() def _A ( ): UpperCamelCase :List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase :Dict = parser.parse_args() return args.f def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ): UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) raise ValueError(F'''can\'t find {path}''' ) __snake_case = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[Any] = F''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_glue.main() UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :int = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[Any] = F''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_clm_flax.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Dict = self.get_auto_remove_tmp_dir() UpperCamelCase :Any = F''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_summarization_flax.main() UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir() UpperCamelCase :List[str] = F''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_mlm_flax.main() UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir() UpperCamelCase :int = F''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_ta_mlm_flax.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def UpperCAmelCase ( self ) -> Tuple: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2 UpperCamelCase :int = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[int] = F''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_ner.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCAmelCase ( self ) -> Any: UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir() UpperCamelCase :Dict = F''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_qa.main() UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
658
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""", } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Tuple ='roc_bert' def __init__( self , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=910 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2_4858 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: UpperCamelCase :int = vocab_size UpperCamelCase :Optional[int] = max_position_embeddings UpperCamelCase :Dict = hidden_size UpperCamelCase :Optional[int] = num_hidden_layers UpperCamelCase :Any = num_attention_heads UpperCamelCase :Optional[int] = intermediate_size UpperCamelCase :Union[str, Any] = hidden_act UpperCamelCase :Tuple = hidden_dropout_prob UpperCamelCase :List[Any] = attention_probs_dropout_prob UpperCamelCase :Tuple = initializer_range UpperCamelCase :List[Any] = type_vocab_size UpperCamelCase :List[str] = layer_norm_eps UpperCamelCase :str = use_cache UpperCamelCase :Union[str, Any] = enable_pronunciation UpperCamelCase :Union[str, Any] = enable_shape UpperCamelCase :List[str] = pronunciation_embed_dim UpperCamelCase :int = pronunciation_vocab_size UpperCamelCase :Dict = shape_embed_dim UpperCamelCase :List[str] = shape_vocab_size UpperCamelCase :Tuple = concat_input UpperCamelCase :List[Any] = position_embedding_type UpperCamelCase :List[str] = classifier_dropout super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
658
from __future__ import annotations from collections.abc import Callable def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ): UpperCamelCase :Optional[Any] = x_start UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[int] = 0.0 for _ in range(SCREAMING_SNAKE_CASE__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCamelCase :Any = (x_end - x_start) / steps + xa UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCamelCase :Optional[int] = xa UpperCamelCase :List[str] = fxa return area if __name__ == "__main__": def _A ( SCREAMING_SNAKE_CASE__ : int ): return x**3 + x**2 print("""f(x) = x^3 + x^2""") print("""The area between the curve, x = -5, x = 5 and the x axis is:""") __snake_case = 10 while i <= 10_00_00: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
658
1
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase_ ( lowercase, lowercase, lowercase ): """simple docstring""" UpperCamelCase_ : Optional[int] =[R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias'] @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 5_0257 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 768 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "gelu_new" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1e-5 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ) -> List[str]: super().__init__() UpperCamelCase :Optional[Any] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) UpperCamelCase :Union[str, Any] = prefix_inner_dim UpperCamelCase :Union[str, Any] = prefix_hidden_dim UpperCamelCase :List[Any] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCamelCase :Optional[Any] = ( nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCamelCase :Tuple = GPTaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , n_positions=SCREAMING_SNAKE_CASE_ , n_embd=SCREAMING_SNAKE_CASE_ , n_layer=SCREAMING_SNAKE_CASE_ , n_head=SCREAMING_SNAKE_CASE_ , n_inner=SCREAMING_SNAKE_CASE_ , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=SCREAMING_SNAKE_CASE_ , embd_pdrop=SCREAMING_SNAKE_CASE_ , attn_pdrop=SCREAMING_SNAKE_CASE_ , layer_norm_epsilon=SCREAMING_SNAKE_CASE_ , initializer_range=SCREAMING_SNAKE_CASE_ , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE_ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Optional[int] = GPTaLMHeadModel(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Any: UpperCamelCase :Optional[int] = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = self.encode_prefix(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.decode_prefix(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: UpperCamelCase :Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) UpperCamelCase :List[str] = torch.cat((dummy_token, input_ids) , dim=1 ) UpperCamelCase :int = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor: return torch.zeros(SCREAMING_SNAKE_CASE_ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: return self.encode_prefix(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase :str = torch.split(SCREAMING_SNAKE_CASE_ , 1 , dim=0 ) UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Any = [] for feature in features: UpperCamelCase :Optional[Any] = self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE_ ) ) # back to the clip feature # Only support beam search for now UpperCamelCase , UpperCamelCase :List[str] = self.generate_beam( input_embeds=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) UpperCamelCase :Union[str, Any] = torch.stack(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = torch.stack(SCREAMING_SNAKE_CASE_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = 5 , SCREAMING_SNAKE_CASE_ = 67 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> List[Any]: UpperCamelCase :Union[str, Any] = eos_token_id UpperCamelCase :Union[str, Any] = None UpperCamelCase :Optional[Any] = None UpperCamelCase :List[str] = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.int ) UpperCamelCase :Any = torch.zeros(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.bool ) if input_embeds is not None: UpperCamelCase :List[Any] = input_embeds else: UpperCamelCase :List[str] = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :List[str] = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = outputs.logits UpperCamelCase :Any = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) UpperCamelCase :Any = logits.softmax(-1 ).log() if scores is None: UpperCamelCase , UpperCamelCase :Dict = logits.topk(SCREAMING_SNAKE_CASE_ , -1 ) UpperCamelCase :Optional[Any] = generated.expand(SCREAMING_SNAKE_CASE_ , *generated.shape[1:] ) UpperCamelCase , UpperCamelCase :Optional[int] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: UpperCamelCase :str = next_tokens else: UpperCamelCase :List[Any] = tokens.expand(SCREAMING_SNAKE_CASE_ , *tokens.shape[1:] ) UpperCamelCase :str = torch.cat((tokens, next_tokens) , dim=1 ) else: UpperCamelCase :int = -float(np.inf ) UpperCamelCase :Optional[int] = 0 UpperCamelCase :Any = scores[:, None] + logits seq_lengths[~is_stopped] += 1 UpperCamelCase :Optional[Any] = scores_sum / seq_lengths[:, None] UpperCamelCase , UpperCamelCase :str = scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE_ , -1 ) UpperCamelCase :List[Any] = next_tokens // scores_sum.shape[1] UpperCamelCase :Any = seq_lengths[next_tokens_source] UpperCamelCase :int = next_tokens % scores_sum.shape[1] UpperCamelCase :Optional[Any] = next_tokens.unsqueeze(1 ) UpperCamelCase :Optional[Any] = tokens[next_tokens_source] UpperCamelCase :int = torch.cat((tokens, next_tokens) , dim=1 ) UpperCamelCase :List[Any] = generated[next_tokens_source] UpperCamelCase :str = scores_sum_average * seq_lengths UpperCamelCase :Tuple = is_stopped[next_tokens_source] UpperCamelCase :Any = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) UpperCamelCase :Tuple = torch.cat((generated, next_token_embed) , dim=1 ) UpperCamelCase :Tuple = is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE_ ).squeeze() if is_stopped.all(): break UpperCamelCase :str = scores / seq_lengths UpperCamelCase :int = scores.argsort(descending=SCREAMING_SNAKE_CASE_ ) # tokens tensors are already padded to max_seq_length UpperCamelCase :List[Any] = [tokens[i] for i in order] UpperCamelCase :List[str] = torch.stack(SCREAMING_SNAKE_CASE_ , dim=0 ) UpperCamelCase :List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
658
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,) UpperCamelCase_ : Any =10 def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :str = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**SCREAMING_SNAKE_CASE_ ) return config def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = 10 UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = scheduler.timesteps[0] UpperCamelCase :Union[str, Any] = scheduler.timesteps[1] UpperCamelCase :str = self.dummy_sample UpperCamelCase :List[str] = 0.1 * sample UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase ( self ) -> List[str]: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :List[Any] = self.scheduler_classes[0] UpperCamelCase :List[Any] = self.get_scheduler_config() UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = scheduler.timesteps UpperCamelCase :Union[str, Any] = torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = self.dummy_model() UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # 1. scale model input UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict noise residual UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. predict previous sample x_t-1 UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :Tuple = pred_prev_sample UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def UpperCAmelCase ( self ) -> str: UpperCamelCase :Dict = self.scheduler_classes[0] UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = scheduler.timesteps UpperCamelCase :int = torch.manual_seed(0 ) UpperCamelCase :str = self.dummy_model() UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict noise residual UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. predict previous sample x_t-1 UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :int = pred_prev_sample UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[str] = self.scheduler_classes[0] UpperCamelCase :Tuple = self.get_scheduler_config() UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :List[str] = self.scheduler_classes[0] UpperCamelCase :List[Any] = self.get_scheduler_config() UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [39, 30, 12, 1, 0] UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[int] = self.scheduler_classes[0] UpperCamelCase :List[str] = self.get_scheduler_config() UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
658
1
def _A ( SCREAMING_SNAKE_CASE__ : int ): if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCamelCase :Any = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) UpperCamelCase :Tuple = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:] UpperCamelCase :Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
658
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __snake_case = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
1
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: super().__init__( SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Optional[Any] = field UpperCamelCase :Any = path_or_paths if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else {self.split: path_or_paths} UpperCamelCase :Tuple = Json( cache_dir=SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , field=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase ( self ) -> Union[str, Any]: # Build iterable dataset if self.streaming: UpperCamelCase :List[str] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCamelCase :Dict = None UpperCamelCase :int = None UpperCamelCase :Dict = None UpperCamelCase :str = None self.builder.download_and_prepare( download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , num_proc=self.num_proc , ) UpperCamelCase :Tuple = self.builder.as_dataset( split=self.split , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory ) return dataset class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) UpperCamelCase :Dict = dataset UpperCamelCase :str = path_or_buf UpperCamelCase :Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCamelCase :Any = num_proc UpperCamelCase :Optional[Any] = '''utf-8''' UpperCamelCase :Tuple = to_json_kwargs def UpperCAmelCase ( self ) -> int: UpperCamelCase :str = self.to_json_kwargs.pop('''path_or_buf''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = self.to_json_kwargs.pop('''orient''' , '''records''' ) UpperCamelCase :Union[str, Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) UpperCamelCase :Tuple = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) UpperCamelCase :Any = self.to_json_kwargs.pop('''compression''' , SCREAMING_SNAKE_CASE_ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=SCREAMING_SNAKE_CASE_ ) as buffer: UpperCamelCase :Optional[Any] = self._write(file_obj=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' ''' was passed. Please provide a local path instead.''' ) UpperCamelCase :str = self._write( file_obj=self.path_or_buf , orient=SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , **self.to_json_kwargs ) return written def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = args UpperCamelCase :Dict = query_table( table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , ) UpperCamelCase :Optional[int] = batch.to_pandas().to_json( path_or_buf=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> int: UpperCamelCase :Optional[Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): UpperCamelCase :Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase , UpperCamelCase :Dict = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(SCREAMING_SNAKE_CASE_ ) return written
658
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: return None class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: return None class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any =[ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> int: from transformers import BertModel UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) vocab_file.flush() UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ ) @require_tf @slow def UpperCAmelCase ( self ) -> str: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return path except Exception as e: self.fail(SCREAMING_SNAKE_CASE_ ) @require_torch @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[str]: from transformers import BertModel UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' ) @require_tf @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[Any]: from transformers import TFBertModel UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Assert all variables are present self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ ) self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
658
1
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __snake_case = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : """simple docstring""" UpperCamelCase_ : str =field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} ) UpperCamelCase_ : str =field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) UpperCamelCase_ : int =field( default=128, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) UpperCamelCase_ : bool =field( default=lowercase, metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :List[str] = self.task_name.lower() class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : str ='train' UpperCamelCase_ : Tuple ='dev' UpperCamelCase_ : int ='test' class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : GlueDataTrainingArguments UpperCamelCase_ : str UpperCamelCase_ : List[InputFeatures] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = Split.train , SCREAMING_SNAKE_CASE_ = None , ) -> List[Any]: warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Any = args UpperCamelCase :Optional[Any] = glue_processors[args.task_name]() UpperCamelCase :List[str] = glue_output_modes[args.task_name] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): try: UpperCamelCase :Union[str, Any] = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file UpperCamelCase :Union[str, Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , ) UpperCamelCase :List[str] = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCamelCase , UpperCamelCase :int = label_list[2], label_list[1] UpperCamelCase :Union[str, Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCamelCase :Optional[Any] = cached_features_file + '''.lock''' with FileLock(SCREAMING_SNAKE_CASE_ ): if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache: UpperCamelCase :Union[str, Any] = time.time() UpperCamelCase :Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) else: logger.info(F'''Creating features from dataset file at {args.data_dir}''' ) if mode == Split.dev: UpperCamelCase :Tuple = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: UpperCamelCase :Optional[Any] = self.processor.get_test_examples(args.data_dir ) else: UpperCamelCase :List[str] = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: UpperCamelCase :Tuple = examples[:limit_length] UpperCamelCase :Dict = glue_convert_examples_to_features( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_length=args.max_seq_length , label_list=SCREAMING_SNAKE_CASE_ , output_mode=self.output_mode , ) UpperCamelCase :List[Any] = time.time() torch.save(self.features , SCREAMING_SNAKE_CASE_ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> List[str]: return len(self.features ) def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> InputFeatures: return self.features[i] def UpperCAmelCase ( self ) -> Union[str, Any]: return self.label_list
658
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Union[str, Any] = tempfile.mkdtemp() UpperCamelCase :List[str] = 5 # Realm tok UpperCamelCase :List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def UpperCAmelCase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records ) return config def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Tuple = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=SCREAMING_SNAKE_CASE_ , ) return block_records def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Optional[int] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[Any] = self.get_config() UpperCamelCase :str = self.get_dummy_retriever() UpperCamelCase :int = retriever.tokenizer UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' ) UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids UpperCamelCase :Tuple = tokenizer( ['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids UpperCamelCase :Optional[Any] = config.reader_seq_len UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Union[str, Any] = self.get_config() UpperCamelCase :Union[str, Any] = self.get_dummy_retriever() UpperCamelCase :Dict = retriever.tokenizer UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' ) UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids UpperCamelCase :Optional[Any] = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids UpperCamelCase :Any = config.reader_seq_len UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :str = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: UpperCamelCase :Tuple = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
658
1
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCAmelCase_ ( lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[Any] =BertTokenizer UpperCamelCase_ : List[Any] =BertTokenizerFast UpperCamelCase_ : Optional[Any] =True UpperCamelCase_ : List[Any] =True UpperCamelCase_ : Optional[Any] =filter_non_english def UpperCAmelCase ( self ) -> Union[str, Any]: super().setUp() UpperCamelCase :Tuple = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCamelCase :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Tuple = '''UNwant\u00E9d,running''' UpperCamelCase :int = '''unwanted, running''' return input_text, output_text def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :Union[str, Any] = self.tokenizer_class(self.vocab_file ) UpperCamelCase :str = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return UpperCamelCase :Dict = self.get_tokenizer() UpperCamelCase :int = self.get_rust_tokenizer() UpperCamelCase :Optional[Any] = '''UNwant\u00E9d,running''' UpperCamelCase :Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = self.get_rust_tokenizer() UpperCamelCase :str = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # With lower casing UpperCamelCase :Dict = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = '''UNwant\u00E9d,running''' UpperCamelCase :Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.get_rust_tokenizer() UpperCamelCase :Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Tuple = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Tuple = BasicTokenizer() UpperCamelCase :List[str] = '''a\n\'ll !!to?\'d of, can\'t.''' UpperCamelCase :List[str] = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.'''] self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] UpperCamelCase :Dict = {} for i, token in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :int = i UpperCamelCase :Any = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase ( self ) -> int: self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase ( self ) -> Optional[int]: self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Optional[int] = self.get_tokenizer() UpperCamelCase :Optional[Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :List[Any] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' ) UpperCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase ( self ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase :str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' UpperCamelCase :int = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :List[Any] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , '''do_lower_case''' ) else False UpperCamelCase :Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Any = ['''的''', '''人''', '''有'''] UpperCamelCase :int = ''''''.join(SCREAMING_SNAKE_CASE_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase :int = True UpperCamelCase :Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = False UpperCamelCase :List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCamelCase :Any = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ ) ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
658
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]: UpperCamelCase :int = parent UpperCamelCase :List[Any] = batch_size UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[int] = max_length UpperCamelCase :Union[str, Any] = num_mel_bins UpperCamelCase :Optional[int] = is_training UpperCamelCase :Dict = use_labels UpperCamelCase :Dict = hidden_size UpperCamelCase :Optional[int] = num_hidden_layers UpperCamelCase :str = num_attention_heads UpperCamelCase :Optional[int] = intermediate_size UpperCamelCase :List[str] = hidden_act UpperCamelCase :List[str] = hidden_dropout_prob UpperCamelCase :List[Any] = attention_probs_dropout_prob UpperCamelCase :str = type_sequence_label_size UpperCamelCase :List[Any] = initializer_range UpperCamelCase :Union[str, Any] = scope UpperCamelCase :List[Any] = frequency_stride UpperCamelCase :Tuple = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension UpperCamelCase :Optional[int] = num_patches + 2 def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :str = self.get_config() return config, input_values, labels def UpperCAmelCase ( self ) -> List[Any]: return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = config_and_inputs UpperCamelCase :List[Any] = {'''input_values''': input_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) UpperCamelCase_ : Any =( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) UpperCamelCase_ : Optional[int] =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Optional[Any] =False UpperCamelCase_ : Dict =False def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = ASTModelTester(self ) UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> str: pass def UpperCAmelCase ( self ) -> int: UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase :Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Any = [*signature.parameters.keys()] UpperCamelCase :Optional[int] = ['''input_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Optional[int]: for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _A ( ): UpperCamelCase :Any = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' ) UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase ( self ) -> Tuple: return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ) if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = self.default_feature_extractor UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.default_feature_extractor UpperCamelCase , UpperCamelCase :Dict = prepare_audio() UpperCamelCase :Dict = audio.squeeze().numpy() UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase :List[Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
658
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : jnp.ndarray UpperCamelCase_ : jnp.ndarray class UpperCAmelCase_ ( nn.Module ): """simple docstring""" UpperCamelCase_ : int UpperCamelCase_ : Tuple[int] =(16, 32, 96, 256) UpperCamelCase_ : jnp.dtype =jnp.floataa def UpperCAmelCase ( self ) -> int: UpperCamelCase :Any = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) UpperCamelCase :Dict = [] for i in range(len(self.block_out_channels ) - 1 ): UpperCamelCase :str = self.block_out_channels[i] UpperCamelCase :Optional[Any] = self.block_out_channels[i + 1] UpperCamelCase :Optional[Any] = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = blocks UpperCamelCase :int = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :str = self.conv_in(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = nn.silu(SCREAMING_SNAKE_CASE_ ) for block in self.blocks: UpperCamelCase :Dict = block(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = nn.silu(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = self.conv_out(SCREAMING_SNAKE_CASE_ ) return embedding @flax_register_to_config class UpperCAmelCase_ ( nn.Module, lowercase, lowercase ): """simple docstring""" UpperCamelCase_ : int =32 UpperCamelCase_ : int =4 UpperCamelCase_ : Tuple[str] =( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) UpperCamelCase_ : Union[bool, Tuple[bool]] =False UpperCamelCase_ : Tuple[int] =(320, 640, 1280, 1280) UpperCamelCase_ : int =2 UpperCamelCase_ : Union[int, Tuple[int]] =8 UpperCamelCase_ : Optional[Union[int, Tuple[int]]] =None UpperCamelCase_ : int =1280 UpperCamelCase_ : float =0.0 UpperCamelCase_ : bool =False UpperCamelCase_ : jnp.dtype =jnp.floataa UpperCamelCase_ : bool =True UpperCamelCase_ : int =0 UpperCamelCase_ : str ="rgb" UpperCamelCase_ : Tuple[int] =(16, 32, 96, 256) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> FrozenDict: # init input tensors UpperCamelCase :Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size) UpperCamelCase :Any = jnp.zeros(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) UpperCamelCase :Dict = jnp.ones((1,) , dtype=jnp.intaa ) UpperCamelCase :List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) UpperCamelCase :List[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8) UpperCamelCase :str = jnp.zeros(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) UpperCamelCase , UpperCamelCase :Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["params"] def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Union[str, Any] = self.block_out_channels UpperCamelCase :List[str] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. UpperCamelCase :int = self.num_attention_heads or self.attention_head_dim # input UpperCamelCase :List[Any] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time UpperCamelCase :Optional[int] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) UpperCamelCase :str = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE_ , dtype=self.dtype ) UpperCamelCase :Any = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) UpperCamelCase :Tuple = self.only_cross_attention if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :List[str] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Optional[Any] = (num_attention_heads,) * len(self.down_block_types ) # down UpperCamelCase :List[Any] = [] UpperCamelCase :Dict = [] UpperCamelCase :List[str] = block_out_channels[0] UpperCamelCase :str = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ ) for i, down_block_type in enumerate(self.down_block_types ): UpperCamelCase :List[Any] = output_channel UpperCamelCase :str = block_out_channels[i] UpperCamelCase :List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": UpperCamelCase :Optional[Any] = FlaxCrossAttnDownBlockaD( in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: UpperCamelCase :str = FlaxDownBlockaD( in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(SCREAMING_SNAKE_CASE_ ) for _ in range(self.layers_per_block ): UpperCamelCase :Any = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ ) if not is_final_block: UpperCamelCase :Dict = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = down_blocks UpperCamelCase :Optional[Any] = controlnet_down_blocks # mid UpperCamelCase :Tuple = block_out_channels[-1] UpperCamelCase :int = FlaxUNetMidBlockaDCrossAttn( in_channels=SCREAMING_SNAKE_CASE_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) UpperCamelCase :Tuple = nn.Conv( SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , ) -> Union[FlaxControlNetOutput, Tuple]: UpperCamelCase :Union[str, Any] = self.controlnet_conditioning_channel_order if channel_order == "bgr": UpperCamelCase :Any = jnp.flip(SCREAMING_SNAKE_CASE_ , axis=1 ) # 1. time if not isinstance(SCREAMING_SNAKE_CASE_ , jnp.ndarray ): UpperCamelCase :int = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(SCREAMING_SNAKE_CASE_ , jnp.ndarray ) and len(timesteps.shape ) == 0: UpperCamelCase :Any = timesteps.astype(dtype=jnp.floataa ) UpperCamelCase :int = jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 0 ) UpperCamelCase :Optional[Any] = self.time_proj(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = self.time_embedding(SCREAMING_SNAKE_CASE_ ) # 2. pre-process UpperCamelCase :str = jnp.transpose(SCREAMING_SNAKE_CASE_ , (0, 2, 3, 1) ) UpperCamelCase :int = self.conv_in(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = jnp.transpose(SCREAMING_SNAKE_CASE_ , (0, 2, 3, 1) ) UpperCamelCase :Union[str, Any] = self.controlnet_cond_embedding(SCREAMING_SNAKE_CASE_ ) sample += controlnet_cond # 3. down UpperCamelCase :Dict = (sample,) for down_block in self.down_blocks: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase , UpperCamelCase :int = down_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=not train ) else: UpperCamelCase , UpperCamelCase :int = down_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=not train ) down_block_res_samples += res_samples # 4. mid UpperCamelCase :List[Any] = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=not train ) # 5. contronet blocks UpperCamelCase :List[Any] = () for down_block_res_sample, controlnet_block in zip(SCREAMING_SNAKE_CASE_ , self.controlnet_down_blocks ): UpperCamelCase :Optional[int] = controlnet_block(SCREAMING_SNAKE_CASE_ ) controlnet_down_block_res_samples += (down_block_res_sample,) UpperCamelCase :Dict = controlnet_down_block_res_samples UpperCamelCase :Optional[Any] = self.controlnet_mid_block(SCREAMING_SNAKE_CASE_ ) # 6. scaling UpperCamelCase :List[Any] = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=SCREAMING_SNAKE_CASE_ , mid_block_res_sample=SCREAMING_SNAKE_CASE_ )
658
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): return image elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ): UpperCamelCase :Dict = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 ) UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0 UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase :Tuple = 2.0 * image - 1.0 UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) return image def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ): if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): UpperCamelCase :int = True UpperCamelCase :Dict = va.device UpperCamelCase :List[Any] = va.cpu().numpy() UpperCamelCase :str = va.cpu().numpy() UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) ) if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD: UpperCamelCase :Any = (1 - t) * va + t * va else: UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = theta_a * t UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a UpperCamelCase :Union[str, Any] = sa * va + sa * va if inputs_are_torch: UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) return va def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ): UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ): for param in model.parameters(): UpperCamelCase :Any = value class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str: super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Union[str, Any] = ( feature_extractor.size if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ ) else feature_extractor.size['''shortest_edge'''] ) UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ ) set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: # get the original timestep using init_timestep UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int: if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' ) UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ ) ] UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) else: UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[str] = 0.1_8215 * init_latents UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) # get latents UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = init_latents return latents def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]: UpperCamelCase :List[str] = latents.detach().requires_grad_() UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep] UpperCamelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :str = self.scheduler.sigmas[index] UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :int = 1 / 0.1_8215 * sample UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype ) UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = latents.detach() + grads * (sigma**2) UpperCamelCase :Optional[Any] = noise_pred_original else: UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1: UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1) UpperCamelCase :Tuple = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]] UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ ) if style_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ ) # get prompt text embeddings for content and style UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # set timesteps UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) UpperCamelCase :List[str] = {} if accepts_offset: UpperCamelCase :Tuple = 1 self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device ) UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ ) # Preprocess image UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if clip_guidance_scale > 0: UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = slerp( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase :Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase :Any = content_text_input.input_ids.shape[-1] UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase :int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCamelCase :str = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase :Dict = {} if accepts_eta: UpperCamelCase :int = eta # check if the scheduler accepts generator UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: UpperCamelCase :List[str] = generator with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ): for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # expand the latents if we are doing classifier free guidance UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform classifier free guidance if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 ) UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: UpperCamelCase :int = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) UpperCamelCase , UpperCamelCase :str = self.cond_fn( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
658
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Any ='vit_msn' def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-06 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = hidden_size UpperCamelCase :int = num_hidden_layers UpperCamelCase :Optional[Any] = num_attention_heads UpperCamelCase :Tuple = intermediate_size UpperCamelCase :str = hidden_act UpperCamelCase :int = hidden_dropout_prob UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob UpperCamelCase :int = initializer_range UpperCamelCase :int = layer_norm_eps UpperCamelCase :Optional[Any] = image_size UpperCamelCase :Tuple = patch_size UpperCamelCase :Optional[Any] = num_channels UpperCamelCase :Union[str, Any] = qkv_bias
658
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :list[list[int]] = [] UpperCamelCase :list[int] = [] UpperCamelCase :List[str] = 0 UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ ) create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return result def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ): if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum: return if sum(SCREAMING_SNAKE_CASE__ ) == max_sum: result.append(SCREAMING_SNAKE_CASE__ ) return for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): create_state_space_tree( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , ) __snake_case = [3, 34, 4, 12, 5, 2] __snake_case = 9 __snake_case = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
658
1
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : List[Any] =['input_features', 'attention_mask'] def __init__( self , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=1_6000 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=25 , SCREAMING_SNAKE_CASE_="hamming_window" , SCREAMING_SNAKE_CASE_=3_2768.0 , SCREAMING_SNAKE_CASE_=0.97 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Dict: super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = feature_size UpperCamelCase :Union[str, Any] = sampling_rate UpperCamelCase :Dict = padding_value UpperCamelCase :Tuple = hop_length UpperCamelCase :Optional[int] = win_length UpperCamelCase :List[Any] = frame_signal_scale UpperCamelCase :List[str] = preemphasis_coeff UpperCamelCase :Optional[int] = mel_floor UpperCamelCase :Optional[int] = normalize_means UpperCamelCase :str = normalize_vars UpperCamelCase :Union[str, Any] = win_function UpperCamelCase :Dict = return_attention_mask UpperCamelCase :Union[str, Any] = win_length * sampling_rate // 1000 UpperCamelCase :Tuple = hop_length * sampling_rate // 1000 UpperCamelCase :Any = optimal_fft_length(self.sample_size ) UpperCamelCase :Optional[Any] = (self.n_fft // 2) + 1 def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> np.ndarray: if self.win_function == "hamming_window": UpperCamelCase :Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase :Tuple = window_function(window_length=self.sample_size , name=self.win_function ) UpperCamelCase :Tuple = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) UpperCamelCase :str = spectrogram( one_waveform * self.frame_signal_scale , window=SCREAMING_SNAKE_CASE_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=SCREAMING_SNAKE_CASE_ , preemphasis=self.preemphasis_coeff , mel_filters=SCREAMING_SNAKE_CASE_ , mel_floor=self.mel_floor , log_mel='''log''' , ) return msfc_features.T def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: # make sure we normalize float32 arrays if self.normalize_means: UpperCamelCase :List[str] = x[:input_length].mean(axis=0 ) UpperCamelCase :Optional[int] = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if self.normalize_vars: UpperCamelCase :str = x[:input_length].std(axis=0 ) UpperCamelCase :Tuple = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if input_length < x.shape[0]: UpperCamelCase :Union[str, Any] = padding_value # make sure array is in float32 UpperCamelCase :Any = x.astype(np.floataa ) return x def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[np.ndarray]: UpperCamelCase :Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) UpperCamelCase :Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) UpperCamelCase :List[Any] = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase :List[str] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): UpperCamelCase :Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase :Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase :List[str] = [raw_speech] # extract fbank features UpperCamelCase :List[Any] = [self._extract_mfsc_features(SCREAMING_SNAKE_CASE_ ) for one_waveform in raw_speech] # convert into correct format for padding UpperCamelCase :Union[str, Any] = BatchFeature({'''input_features''': features} ) UpperCamelCase :Optional[Any] = self.pad( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # make sure list is in array format UpperCamelCase :Tuple = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features] UpperCamelCase :str = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: UpperCamelCase :Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCamelCase :int = ( np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCamelCase :List[str] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ ) if return_tensors is not None: UpperCamelCase :Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return padded_inputs
658
def _A ( SCREAMING_SNAKE_CASE__ : int ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ): if not 0 < percent < 100: raise ValueError('''solution() only accepts values from 0 to 100''' ) UpperCamelCase :Tuple = 0 UpperCamelCase :str = 1 while True: if check_bouncy(SCREAMING_SNAKE_CASE__ ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
658
1
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __snake_case = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ __snake_case = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ __snake_case = R""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase :Tuple = 0.0 for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0 UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ ) return { "accuracy": accuracy, }
658
def _A ( SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :Union[str, Any] = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) UpperCamelCase :str = hex_num[0] == '''-''' if is_negative: UpperCamelCase :Union[str, Any] = hex_num[1:] try: UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) UpperCamelCase :Dict = '''''' while int_num > 0: UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
658
1
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ): UpperCamelCase :List[str] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: UpperCamelCase :Dict = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: UpperCamelCase :Any = 4 UpperCamelCase :str = 48 UpperCamelCase :str = '''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: UpperCamelCase :Union[str, Any] = [6, 6, 6, 6] UpperCamelCase :Union[str, Any] = 60 UpperCamelCase :Any = [6, 6, 6, 6] UpperCamelCase :int = '''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: UpperCamelCase :Union[str, Any] = 4 UpperCamelCase :Optional[Any] = '''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: UpperCamelCase :str = 1 UpperCamelCase :Tuple = 1 UpperCamelCase :List[str] = 126 UpperCamelCase :List[str] = 7 UpperCamelCase :Optional[Any] = 2_55.0 UpperCamelCase :Union[str, Any] = '''''' return config def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ): if "patch_embed.proj" in name and "layers" not in name: UpperCamelCase :List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: UpperCamelCase :List[str] = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: UpperCamelCase :List[Any] = name.replace('''layers''' , '''encoder.stages''' ) if "residual_group.blocks" in name: UpperCamelCase :List[str] = name.replace('''residual_group.blocks''' , '''layers''' ) if "attn.proj" in name: UpperCamelCase :Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCamelCase :Any = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCamelCase :Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCamelCase :Any = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCamelCase :Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCamelCase :Dict = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: UpperCamelCase :Any = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: UpperCamelCase :Tuple = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: UpperCamelCase :Optional[int] = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: UpperCamelCase :Optional[int] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: UpperCamelCase :Any = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' ) if name == "norm.weight": UpperCamelCase :Optional[int] = '''layernorm.weight''' if name == "norm.bias": UpperCamelCase :Optional[Any] = '''layernorm.bias''' if "conv_first" in name: UpperCamelCase :int = name.replace('''conv_first''' , '''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: UpperCamelCase :Tuple = name.replace('''conv_last''' , '''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: UpperCamelCase :Dict = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' ) if "upsample.0" in name: UpperCamelCase :Union[str, Any] = name.replace('''upsample.0''' , '''upsample.convolution_0''' ) if "upsample.2" in name: UpperCamelCase :str = name.replace('''upsample.2''' , '''upsample.convolution_1''' ) UpperCamelCase :Tuple = '''upsample.''' + name elif config.upsampler == "pixelshuffledirect": UpperCamelCase :List[Any] = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' ) UpperCamelCase :Dict = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' ) else: pass else: UpperCamelCase :str = '''swin2sr.''' + name return name def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ): for key in orig_state_dict.copy().keys(): UpperCamelCase :Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: UpperCamelCase :int = key.split('''.''' ) UpperCamelCase :Optional[int] = int(key_split[1] ) UpperCamelCase :Optional[int] = int(key_split[4] ) UpperCamelCase :Any = config.embed_dim if "weight" in key: UpperCamelCase :Tuple = val[:dim, :] UpperCamelCase :Union[str, Any] = val[dim : dim * 2, :] UpperCamelCase :Any = val[-dim:, :] else: UpperCamelCase :Dict = val[:dim] UpperCamelCase :Dict = val[dim : dim * 2] UpperCamelCase :Any = val[-dim:] pass else: UpperCamelCase :Optional[Any] = val return orig_state_dict def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Optional[int] = get_config(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE__ ) model.eval() UpperCamelCase :int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) UpperCamelCase :Tuple = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase , UpperCamelCase :Union[str, Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(SCREAMING_SNAKE_CASE__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'''Unexpected key {key} in state_dict''' ) # verify values UpperCamelCase :Optional[Any] = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' UpperCamelCase :List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' ) UpperCamelCase :str = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values UpperCamelCase :List[str] = 126 if '''Jpeg''' in checkpoint_url else 256 UpperCamelCase :Tuple = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ), ] ) UpperCamelCase :Any = transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) if config.num_channels == 1: UpperCamelCase :List[str] = pixel_values[:, 0, :, :].unsqueeze(1 ) UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: UpperCamelCase :Any = torch.Size([1, 3, 512, 512] ) UpperCamelCase :Dict = torch.tensor( [[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: UpperCamelCase :Tuple = torch.Size([1, 3, 1024, 1024] ) UpperCamelCase :str = torch.tensor( [[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here UpperCamelCase :Dict = torch.Size([1, 3, 1024, 1024] ) UpperCamelCase :Dict = torch.tensor( [[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: UpperCamelCase :Dict = torch.Size([1, 3, 512, 512] ) UpperCamelCase :List[str] = torch.tensor( [[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: UpperCamelCase :Any = torch.Size([1, 3, 1024, 1024] ) UpperCamelCase :Optional[Any] = torch.tensor( [[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) print('''Looks ok!''' ) UpperCamelCase :Optional[int] = { '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } UpperCamelCase :Any = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: model.push_to_hub(F'''caidas/{model_name}''' ) processor.push_to_hub(F'''caidas/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") __snake_case = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
658
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase , UpperCamelCase :List[Any] = position UpperCamelCase :Any = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCamelCase :Dict = [] for position in positions: UpperCamelCase , UpperCamelCase :str = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE__ ) return permissible_positions def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): if is_complete(SCREAMING_SNAKE_CASE__ ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): UpperCamelCase , UpperCamelCase :Optional[int] = position if board[y][x] == 0: UpperCamelCase :Any = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ): return True UpperCamelCase :Union[str, Any] = 0 return False def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ): return board UpperCamelCase :str = 0 UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
658
1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE__ , x % y ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : int = 20 ): UpperCamelCase :str = 1 for i in range(1 , n + 1 ): UpperCamelCase :str = lcm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return g if __name__ == "__main__": print(f'''{solution() = }''')
658
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('''foo.json''',)] ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :int = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' ) UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = GenerationConfig() UpperCamelCase :List[str] = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ ) # update_kwargs was not modified (no side effects) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[Any] = GenerationConfig() UpperCamelCase :Tuple = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Dict = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(default_config.num_beams , 1 ) UpperCamelCase :Tuple = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @classmethod def UpperCAmelCase ( cls ) -> Optional[Any]: UpperCamelCase :List[str] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def UpperCAmelCase ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Optional[Any] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
658
1
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Dict: super().__init__( SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Optional[Any] = path_or_paths if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else {self.split: path_or_paths} UpperCamelCase :List[Any] = Text( cache_dir=SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase ( self ) -> Dict: # Build iterable dataset if self.streaming: UpperCamelCase :Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCamelCase :Union[str, Any] = None UpperCamelCase :Optional[Any] = None UpperCamelCase :Optional[Any] = None UpperCamelCase :List[Any] = None self.builder.download_and_prepare( download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , num_proc=self.num_proc , ) UpperCamelCase :Tuple = self.builder.as_dataset( split=self.split , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory ) return dataset
658
def _A ( SCREAMING_SNAKE_CASE__ : int ): if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
658
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata __snake_case = """""" if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""): class UpperCAmelCase_ ( tr.AbstractTransform ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = " " ) -> str: UpperCamelCase :str = sentence_delimiter def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return list(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase :str = [] for sent_idx, sentence in enumerate(SCREAMING_SNAKE_CASE_ ): chars.extend(self.process_string(SCREAMING_SNAKE_CASE_ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(SCREAMING_SNAKE_CASE_ ) - 1: chars.append(self.sentence_delimiter ) return chars __snake_case = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __snake_case = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __snake_case = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ __snake_case = """\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. """ __snake_case = """ Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> cer = datasets.load_metric(\"cer\") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> int: if concatenate_texts: return jiwer.compute_measures( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , )["wer"] UpperCamelCase :List[str] = 0 UpperCamelCase :str = 0 for prediction, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :str = jiwer.compute_measures( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
658
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool __snake_case = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M' UpperCamelCase_ : Optional[Any] =( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) UpperCamelCase_ : Dict ='translator' UpperCamelCase_ : Any =AutoTokenizer UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM UpperCamelCase_ : List[Any] =LANGUAGE_CODES UpperCamelCase_ : int =['text', 'text', 'text'] UpperCamelCase_ : Union[str, Any] =['text'] def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) UpperCamelCase :Optional[int] = self.lang_to_code[src_lang] UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return self.model.generate(**SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
658
1
__snake_case = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ __snake_case = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __snake_case = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
658
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __snake_case = 10 def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if array[i] == target: return i return -1 def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Tuple = 0 UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ ) while left <= right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1 UpperCamelCase :str = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: UpperCamelCase :int = one_third - 1 elif array[two_third] < target: UpperCamelCase :Any = two_third + 1 else: UpperCamelCase :Any = one_third + 1 UpperCamelCase :int = two_third - 1 else: return -1 def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): if left < right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = (left + right) // 3 + 1 UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __snake_case = input("""Enter numbers separated by comma:\n""").strip() __snake_case = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __snake_case = int(input("""Enter the number to be found in the list:\n""").strip()) __snake_case = ite_ternary_search(collection, target) __snake_case = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
658
1
import numpy as np def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray ): return 1 / (1 + np.exp(-vector )) def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray ): return vector * sigmoid(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
658
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if n == 0: return 0 UpperCamelCase :Union[str, Any] = float('''-inf''' ) for i in range(1 , n + 1 ): UpperCamelCase :str = max( SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) ) return max_revue def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: UpperCamelCase :Dict = float('''-inf''' ) for i in range(1 , n + 1 ): UpperCamelCase :Union[str, Any] = max( SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) UpperCamelCase :str = max_revenue return max_rev[n] def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )] UpperCamelCase :Dict = 0 for i in range(1 , n + 1 ): UpperCamelCase :Optional[Any] = max_rev[i] for j in range(1 , i + 1 ): UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] ) UpperCamelCase :Tuple = max_revenue_i return max_rev[n] def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): if n < 0: UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if n > len(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Union[str, Any] = ( '''Each integral piece of rod must have a corresponding price. ''' F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) def _A ( ): UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23] UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. UpperCamelCase :str = 36 UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
658
1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): while b: UpperCamelCase , UpperCamelCase :int = b, a % b return a def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE__ , a % b ) def _A ( ): print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
658
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case = logging.get_logger(__name__) __snake_case = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class UpperCAmelCase_ ( lowercase, lowercase ): """simple docstring""" UpperCamelCase_ : int ='focalnet' def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = image_size UpperCamelCase :Dict = patch_size UpperCamelCase :Tuple = num_channels UpperCamelCase :int = embed_dim UpperCamelCase :Optional[Any] = use_conv_embed UpperCamelCase :str = hidden_sizes UpperCamelCase :str = depths UpperCamelCase :Optional[int] = focal_levels UpperCamelCase :Tuple = focal_windows UpperCamelCase :Optional[int] = hidden_act UpperCamelCase :Optional[int] = mlp_ratio UpperCamelCase :Optional[Any] = hidden_dropout_prob UpperCamelCase :int = drop_path_rate UpperCamelCase :Dict = use_layerscale UpperCamelCase :List[str] = layerscale_value UpperCamelCase :Tuple = use_post_layernorm UpperCamelCase :int = use_post_layernorm_in_modulation UpperCamelCase :str = normalize_modulator UpperCamelCase :Any = initializer_range UpperCamelCase :Optional[Any] = layer_norm_eps UpperCamelCase :Dict = encoder_stride UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
658
1
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): return image elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ): UpperCamelCase :Dict = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 ) UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0 UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase :Tuple = 2.0 * image - 1.0 UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) return image def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ): if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): UpperCamelCase :int = True UpperCamelCase :Dict = va.device UpperCamelCase :List[Any] = va.cpu().numpy() UpperCamelCase :str = va.cpu().numpy() UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) ) if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD: UpperCamelCase :Any = (1 - t) * va + t * va else: UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = theta_a * t UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a UpperCamelCase :Union[str, Any] = sa * va + sa * va if inputs_are_torch: UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) return va def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ): UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ): for param in model.parameters(): UpperCamelCase :Any = value class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str: super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Union[str, Any] = ( feature_extractor.size if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ ) else feature_extractor.size['''shortest_edge'''] ) UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ ) set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: # get the original timestep using init_timestep UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int: if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' ) UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ ) ] UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) else: UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[str] = 0.1_8215 * init_latents UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) # get latents UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = init_latents return latents def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]: UpperCamelCase :List[str] = latents.detach().requires_grad_() UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep] UpperCamelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :str = self.scheduler.sigmas[index] UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :int = 1 / 0.1_8215 * sample UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype ) UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = latents.detach() + grads * (sigma**2) UpperCamelCase :Optional[Any] = noise_pred_original else: UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1: UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1) UpperCamelCase :Tuple = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]] UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ ) if style_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ ) # get prompt text embeddings for content and style UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # set timesteps UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) UpperCamelCase :List[str] = {} if accepts_offset: UpperCamelCase :Tuple = 1 self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device ) UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ ) # Preprocess image UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if clip_guidance_scale > 0: UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = slerp( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase :Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase :Any = content_text_input.input_ids.shape[-1] UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase :int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCamelCase :str = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase :Dict = {} if accepts_eta: UpperCamelCase :int = eta # check if the scheduler accepts generator UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: UpperCamelCase :List[str] = generator with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ): for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # expand the latents if we are doing classifier free guidance UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform classifier free guidance if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 ) UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: UpperCamelCase :int = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) UpperCamelCase , UpperCamelCase :str = self.cond_fn( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
658
import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int: UpperCamelCase :Union[str, Any] = parent UpperCamelCase :Tuple = batch_size UpperCamelCase :Optional[Any] = image_size UpperCamelCase :Any = patch_size UpperCamelCase :List[str] = num_channels UpperCamelCase :int = is_training UpperCamelCase :str = use_labels UpperCamelCase :Optional[Any] = hidden_size UpperCamelCase :int = num_hidden_layers UpperCamelCase :List[Any] = backbone_out_indices UpperCamelCase :str = num_attention_heads UpperCamelCase :Tuple = intermediate_size UpperCamelCase :Optional[int] = hidden_act UpperCamelCase :List[Any] = hidden_dropout_prob UpperCamelCase :List[str] = attention_probs_dropout_prob UpperCamelCase :Union[str, Any] = initializer_range UpperCamelCase :List[Any] = num_labels UpperCamelCase :int = backbone_featmap_shape UpperCamelCase :Any = scope UpperCamelCase :int = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase :Dict = (image_size // patch_size) ** 2 UpperCamelCase :List[str] = num_patches + 1 def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase :Optional[int] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Any = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [96, 192, 384, 768], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase :Optional[Any] = self.num_labels UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase :Optional[int] = self.num_labels UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Dict = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () UpperCamelCase_ : Tuple =( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase_ : Tuple =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Tuple =False def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Union[str, Any] = DPTModelTester(self ) UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> int: pass def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase :int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Optional[int] = [*signature.parameters.keys()] UpperCamelCase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[str]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Any = True if model_class in get_values(SCREAMING_SNAKE_CASE_ ): continue UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.train() UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def UpperCAmelCase ( self ) -> Tuple: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Optional[Any] = False UpperCamelCase :List[Any] = True if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing: continue UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.gradient_checkpointing_enable() model.train() UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ ) # Skip the check for the backbone UpperCamelCase :Optional[int] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase ( self ) -> Any: pass @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Union[str, Any] = '''add''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) def _A ( ): UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = prepare_img() UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = outputs.predicted_depth # verify the predicted depth UpperCamelCase :int = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
658
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case = logging.get_logger(__name__) __snake_case = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class UpperCAmelCase_ ( lowercase, lowercase ): """simple docstring""" UpperCamelCase_ : Optional[int] ='convnextv2' def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = num_channels UpperCamelCase :Dict = patch_size UpperCamelCase :Optional[Any] = num_stages UpperCamelCase :int = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase :str = [3, 3, 9, 3] if depths is None else depths UpperCamelCase :int = hidden_act UpperCamelCase :Optional[int] = initializer_range UpperCamelCase :Optional[Any] = layer_norm_eps UpperCamelCase :Tuple = drop_path_rate UpperCamelCase :Optional[Any] = image_size UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase :str = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
658
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Union[str, Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCamelCase :Any = 128 elif "12-12" in model_name: UpperCamelCase :Union[str, Any] = 12 UpperCamelCase :Any = 12 elif "14-14" in model_name: UpperCamelCase :Optional[int] = 14 UpperCamelCase :List[str] = 14 elif "16-16" in model_name: UpperCamelCase :List[Any] = 16 UpperCamelCase :Optional[Any] = 16 else: raise ValueError('''Model not supported''' ) UpperCamelCase :Tuple = '''huggingface/label-files''' if "speech-commands" in model_name: UpperCamelCase :Optional[Any] = 35 UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json''' else: UpperCamelCase :Optional[int] = 527 UpperCamelCase :List[Any] = '''audioset-id2label.json''' UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} UpperCamelCase :List[Any] = idalabel UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()} return config def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): if "module.v" in name: UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ): for key in orig_state_dict.copy().keys(): UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: UpperCamelCase :Any = key.split('''.''' ) UpperCamelCase :str = int(key_split[3] ) UpperCamelCase :Union[str, Any] = config.hidden_size if "weight" in key: UpperCamelCase :List[str] = val[:dim, :] UpperCamelCase :Optional[Any] = val[dim : dim * 2, :] UpperCamelCase :Optional[Any] = val[-dim:, :] else: UpperCamelCase :Dict = val[:dim] UpperCamelCase :Optional[int] = val[dim : dim * 2] UpperCamelCase :List[Any] = val[-dim:] else: UpperCamelCase :Union[str, Any] = val return orig_state_dict def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[str] = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ): UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict UpperCamelCase :Optional[int] = model_name_to_url[model_name] UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) # remove some keys remove_keys(SCREAMING_SNAKE_CASE__ ) # rename some keys UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load 🤗 model UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78 UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26 UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128 UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) if "speech-commands" in model_name: UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array'''] else: UpperCamelCase :List[Any] = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = waveform.squeeze().numpy() UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' ) # forward pass UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __snake_case = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
658
1
def _A ( SCREAMING_SNAKE_CASE__ : int ): if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
658
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
1
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _A ( SCREAMING_SNAKE_CASE__ : List[str] ): if "model" in orig_key: UpperCamelCase :Any = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: UpperCamelCase :Dict = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: UpperCamelCase :Union[str, Any] = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: UpperCamelCase :Any = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: UpperCamelCase :int = orig_key.split('''.''' )[0].split('''_''' )[-1] UpperCamelCase :Dict = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: UpperCamelCase :str = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: UpperCamelCase :str = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: UpperCamelCase :List[str] = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: UpperCamelCase :Union[str, Any] = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: UpperCamelCase :Any = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: UpperCamelCase :List[Any] = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: UpperCamelCase :Tuple = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: UpperCamelCase :Dict = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: UpperCamelCase :List[Any] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: UpperCamelCase :int = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: UpperCamelCase :Tuple = '''yoso.''' + orig_key return orig_key def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ): for key in orig_state_dict.copy().keys(): UpperCamelCase :List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if ("pooler" in key) or ("sen_class" in key): continue else: UpperCamelCase :List[str] = val UpperCamelCase :Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias'''] UpperCamelCase :Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ ).expand((1, -1) ) + 2 return orig_state_dict def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :int = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model_state_dict'''] UpperCamelCase :Optional[int] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :int = YosoForMaskedLM(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Any = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE__ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE__ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __snake_case = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
658
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __snake_case = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ __snake_case = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ __snake_case = R""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase :Tuple = 0.0 for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0 UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ ) return { "accuracy": accuracy, }
658
1
from __future__ import annotations from random import random class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = None ) -> Dict: UpperCamelCase :Optional[int] = value UpperCamelCase :Optional[Any] = random() UpperCamelCase :Node | None = None UpperCamelCase :Node | None = None def __repr__( self ) -> str: from pprint import pformat if self.left is None and self.right is None: return F'''\'{self.value}: {self.prior:.5}\'''' else: return pformat( {F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: UpperCamelCase :Dict = str(self.value ) + ''' ''' UpperCamelCase :Optional[int] = str(self.left or '''''' ) UpperCamelCase :Union[str, Any] = str(self.right or '''''' ) return value + left + right def _A ( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: UpperCamelCase , UpperCamelCase :List[Any] = split(root.left , SCREAMING_SNAKE_CASE__ ) return left, root else: UpperCamelCase , UpperCamelCase :Optional[int] = split(root.right , SCREAMING_SNAKE_CASE__ ) return root, right def _A ( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : Node | None ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: UpperCamelCase :Dict = merge(left.right , SCREAMING_SNAKE_CASE__ ) return left else: UpperCamelCase :Any = merge(SCREAMING_SNAKE_CASE__ , right.left ) return right def _A ( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Optional[Any] = Node(SCREAMING_SNAKE_CASE__ ) UpperCamelCase , UpperCamelCase :int = split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return merge(merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase , UpperCamelCase :str = split(SCREAMING_SNAKE_CASE__ , value - 1 ) UpperCamelCase , UpperCamelCase :Any = split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : Node | None ): if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def _A ( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : str ): for arg in args.split(): if arg[0] == "+": UpperCamelCase :Union[str, Any] = insert(SCREAMING_SNAKE_CASE__ , int(arg[1:] ) ) elif arg[0] == "-": UpperCamelCase :int = erase(SCREAMING_SNAKE_CASE__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def _A ( ): UpperCamelCase :List[Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) UpperCamelCase :Dict = input() while args != "q": UpperCamelCase :List[str] = interact_treap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
658
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow __snake_case = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ """text-classification""", """language-modeling""", """summarization""", """token-classification""", """question-answering""", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) __snake_case = logging.getLogger() def _A ( ): UpperCamelCase :List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase :Dict = parser.parse_args() return args.f def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ): UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) raise ValueError(F'''can\'t find {path}''' ) __snake_case = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[Any] = F''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_glue.main() UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :int = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[Any] = F''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_clm_flax.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Dict = self.get_auto_remove_tmp_dir() UpperCamelCase :Any = F''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_summarization_flax.main() UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir() UpperCamelCase :List[str] = F''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_mlm_flax.main() UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir() UpperCamelCase :int = F''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_ta_mlm_flax.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def UpperCAmelCase ( self ) -> Tuple: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2 UpperCamelCase :int = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[int] = F''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_ner.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCAmelCase ( self ) -> Any: UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir() UpperCamelCase :Dict = F''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_qa.main() UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
658
1
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase , UpperCamelCase :List[Any] = position UpperCamelCase :Any = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCamelCase :Dict = [] for position in positions: UpperCamelCase , UpperCamelCase :str = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE__ ) return permissible_positions def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): if is_complete(SCREAMING_SNAKE_CASE__ ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): UpperCamelCase , UpperCamelCase :Optional[int] = position if board[y][x] == 0: UpperCamelCase :Any = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ): return True UpperCamelCase :Union[str, Any] = 0 return False def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ): return board UpperCamelCase :str = 0 UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
658
from __future__ import annotations from collections.abc import Callable def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ): UpperCamelCase :Optional[Any] = x_start UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[int] = 0.0 for _ in range(SCREAMING_SNAKE_CASE__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCamelCase :Any = (x_end - x_start) / steps + xa UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCamelCase :Optional[int] = xa UpperCamelCase :List[str] = fxa return area if __name__ == "__main__": def _A ( SCREAMING_SNAKE_CASE__ : int ): return x**3 + x**2 print("""f(x) = x^3 + x^2""") print("""The area between the curve, x = -5, x = 5 and the x axis is:""") __snake_case = 10 while i <= 10_00_00: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
658
1
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = {"""vocab_file""": """sentencepiece.model"""} __snake_case = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, } __snake_case = { """google/rembert""": 2_56, } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]: super().__init__( do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :List[str] = do_lower_case UpperCamelCase :Any = remove_space UpperCamelCase :Dict = keep_accents UpperCamelCase :int = vocab_file UpperCamelCase :List[Any] = spm.SentencePieceProcessor() self.sp_model.Load(SCREAMING_SNAKE_CASE_ ) @property def UpperCAmelCase ( self ) -> List[Any]: return len(self.sp_model ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Any: UpperCamelCase :Optional[int] = self.__dict__.copy() UpperCamelCase :List[Any] = None return state def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :str = d UpperCamelCase :Optional[Any] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[Any]: UpperCamelCase :Optional[Any] = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE_ ) return pieces def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any: return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Optional[Any] = self.sp_model.decode_pieces(SCREAMING_SNAKE_CASE_ ) return out_string def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase :int = [self.sep_token_id] UpperCamelCase :Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]: UpperCamelCase :Any = [self.sep_token_id] UpperCamelCase :int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) ) return UpperCamelCase :Tuple = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
658
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,) UpperCamelCase_ : Any =10 def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :str = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**SCREAMING_SNAKE_CASE_ ) return config def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = 10 UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = scheduler.timesteps[0] UpperCamelCase :Union[str, Any] = scheduler.timesteps[1] UpperCamelCase :str = self.dummy_sample UpperCamelCase :List[str] = 0.1 * sample UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase ( self ) -> List[str]: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :List[Any] = self.scheduler_classes[0] UpperCamelCase :List[Any] = self.get_scheduler_config() UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = scheduler.timesteps UpperCamelCase :Union[str, Any] = torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = self.dummy_model() UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # 1. scale model input UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict noise residual UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. predict previous sample x_t-1 UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :Tuple = pred_prev_sample UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def UpperCAmelCase ( self ) -> str: UpperCamelCase :Dict = self.scheduler_classes[0] UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = scheduler.timesteps UpperCamelCase :int = torch.manual_seed(0 ) UpperCamelCase :str = self.dummy_model() UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict noise residual UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. predict previous sample x_t-1 UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :int = pred_prev_sample UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[str] = self.scheduler_classes[0] UpperCamelCase :Tuple = self.get_scheduler_config() UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :List[str] = self.scheduler_classes[0] UpperCamelCase :List[Any] = self.get_scheduler_config() UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [39, 30, 12, 1, 0] UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[int] = self.scheduler_classes[0] UpperCamelCase :List[str] = self.get_scheduler_config() UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
658
1
__snake_case = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []} __snake_case = ["""a""", """b""", """c""", """d""", """e"""] def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[str] = start # add current to visited visited.append(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: UpperCamelCase :Optional[Any] = topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # if all neighbors visited add current to sort sort.append(SCREAMING_SNAKE_CASE__ ) # if all vertices haven't been visited select a new one to visit if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): for vertice in vertices: if vertice not in visited: UpperCamelCase :Dict = topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # return sort return sort if __name__ == "__main__": __snake_case = topological_sort("""a""", [], []) print(sort)
658
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __snake_case = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __snake_case = trt.Logger(trt.Logger.WARNING) __snake_case = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __snake_case = logging.getLogger(__name__) __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=3_84, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=1_28, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) __snake_case = parser.parse_args() if args.tokenizer_name: __snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) __snake_case = args.per_device_eval_batch_size __snake_case = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __snake_case = True __snake_case = """temp_engine/bert-fp32.engine""" if args.fpaa: __snake_case = """temp_engine/bert-fp16.engine""" if args.inta: __snake_case = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") __snake_case = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __snake_case = [network.get_input(i) for i in range(network.num_inputs)] __snake_case = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __snake_case = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __snake_case = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __snake_case = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :Tuple = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) UpperCamelCase :Optional[int] = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) UpperCamelCase :Optional[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__ ) # start time UpperCamelCase :Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(SCREAMING_SNAKE_CASE__ ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Synchronize the stream and take time stream.synchronize() # end time UpperCamelCase :int = time.time() UpperCamelCase :str = end_time - start_time UpperCamelCase :str = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __snake_case = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __snake_case = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __snake_case = raw_datasets["""validation"""].column_names __snake_case = """question""" if """question""" in column_names else column_names[0] __snake_case = """context""" if """context""" in column_names else column_names[1] __snake_case = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __snake_case = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __snake_case = min(args.max_seq_length, tokenizer.model_max_length) def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace UpperCamelCase :Tuple = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. UpperCamelCase :List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. UpperCamelCase :str = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. UpperCamelCase :Any = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). UpperCamelCase :List[Any] = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. UpperCamelCase :List[Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. UpperCamelCase :Optional[Any] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples __snake_case = raw_datasets["""validation"""] # Validation Feature Creation __snake_case = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) __snake_case = default_data_collator __snake_case = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) __snake_case = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. UpperCamelCase :Optional[int] = postprocess_qa_predictions( examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: UpperCamelCase :List[str] = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: UpperCamelCase :int = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] UpperCamelCase :List[Any] = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) __snake_case = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _A ( SCREAMING_SNAKE_CASE__ : Any ): return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__ ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__ ).itemsize # Allocate device memory for inputs and outputs. __snake_case = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __snake_case = cuda.mem_alloc(h_outputa.nbytes) __snake_case = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __snake_case = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __snake_case = 0.0 __snake_case = 0 __snake_case = timeit.default_timer() __snake_case = None for step, batch in enumerate(eval_dataloader): __snake_case , __snake_case = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __snake_case , __snake_case = outputs __snake_case = torch.tensor(start_logits) __snake_case = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __snake_case = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) __snake_case = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) __snake_case = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __snake_case = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: __snake_case = nested_truncate(all_preds, len(eval_dataset)) __snake_case = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 10_00 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 10_00)) logger.info("""Total Number of Inference = %d""", niter) __snake_case = post_processing_function(eval_examples, eval_dataset, all_preds) __snake_case = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
658
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: return None class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: return None class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any =[ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> int: from transformers import BertModel UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) vocab_file.flush() UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ ) @require_tf @slow def UpperCAmelCase ( self ) -> str: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return path except Exception as e: self.fail(SCREAMING_SNAKE_CASE_ ) @require_torch @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[str]: from transformers import BertModel UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' ) @require_tf @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[Any]: from transformers import TFBertModel UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Assert all variables are present self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ ) self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
658
1
import re def _A ( SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :Dict = re.compile( R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' ) return bool(re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": __snake_case = """0094702343221""" print(is_sri_lankan_phone_number(phone))
658
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Union[str, Any] = tempfile.mkdtemp() UpperCamelCase :List[str] = 5 # Realm tok UpperCamelCase :List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def UpperCAmelCase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records ) return config def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Tuple = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=SCREAMING_SNAKE_CASE_ , ) return block_records def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Optional[int] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[Any] = self.get_config() UpperCamelCase :str = self.get_dummy_retriever() UpperCamelCase :int = retriever.tokenizer UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' ) UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids UpperCamelCase :Tuple = tokenizer( ['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids UpperCamelCase :Optional[Any] = config.reader_seq_len UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Union[str, Any] = self.get_config() UpperCamelCase :Union[str, Any] = self.get_dummy_retriever() UpperCamelCase :Dict = retriever.tokenizer UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' ) UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids UpperCamelCase :Optional[Any] = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids UpperCamelCase :Any = config.reader_seq_len UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :str = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: UpperCamelCase :Tuple = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
658
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __snake_case = logging.getLogger(__name__) class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_=-1 ) -> Optional[Any]: # in NER datasets, the last column is usually reserved for NER label UpperCamelCase :List[str] = label_idx def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[InputExample]: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Optional[int] = mode.value UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{mode}.txt''' ) UpperCamelCase :Dict = 1 UpperCamelCase :Optional[Any] = [] with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as f: UpperCamelCase :int = [] UpperCamelCase :Optional[int] = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) ) guid_index += 1 UpperCamelCase :Dict = [] UpperCamelCase :Union[str, Any] = [] else: UpperCamelCase :Tuple = line.split(''' ''' ) words.append(splits[0] ) if len(SCREAMING_SNAKE_CASE_ ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) ) return examples def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :Dict = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(SCREAMING_SNAKE_CASE_ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: UpperCamelCase :Optional[int] = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(SCREAMING_SNAKE_CASE_ ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: UpperCamelCase :Any = f.read().splitlines() if "O" not in labels: UpperCamelCase :str = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self ) -> Tuple: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: UpperCamelCase :Union[str, Any] = f.read().splitlines() if "O" not in labels: UpperCamelCase :Any = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[InputExample]: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :List[str] = mode.value UpperCamelCase :int = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{mode}.txt''' ) UpperCamelCase :Union[str, Any] = 1 UpperCamelCase :Tuple = [] with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as f: for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :List[Any] = [] UpperCamelCase :Tuple = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) ) guid_index += 1 return examples def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :str = 0 for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Optional[int] = preds_list[example_id] UpperCamelCase :Dict = '''''' for token in sentence: out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ''' out += "\n" writer.write(SCREAMING_SNAKE_CASE_ ) example_id += 1 def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
658
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]: UpperCamelCase :int = parent UpperCamelCase :List[Any] = batch_size UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[int] = max_length UpperCamelCase :Union[str, Any] = num_mel_bins UpperCamelCase :Optional[int] = is_training UpperCamelCase :Dict = use_labels UpperCamelCase :Dict = hidden_size UpperCamelCase :Optional[int] = num_hidden_layers UpperCamelCase :str = num_attention_heads UpperCamelCase :Optional[int] = intermediate_size UpperCamelCase :List[str] = hidden_act UpperCamelCase :List[str] = hidden_dropout_prob UpperCamelCase :List[Any] = attention_probs_dropout_prob UpperCamelCase :str = type_sequence_label_size UpperCamelCase :List[Any] = initializer_range UpperCamelCase :Union[str, Any] = scope UpperCamelCase :List[Any] = frequency_stride UpperCamelCase :Tuple = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension UpperCamelCase :Optional[int] = num_patches + 2 def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :str = self.get_config() return config, input_values, labels def UpperCAmelCase ( self ) -> List[Any]: return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = config_and_inputs UpperCamelCase :List[Any] = {'''input_values''': input_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) UpperCamelCase_ : Any =( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) UpperCamelCase_ : Optional[int] =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Optional[Any] =False UpperCamelCase_ : Dict =False def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = ASTModelTester(self ) UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> str: pass def UpperCAmelCase ( self ) -> int: UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase :Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Any = [*signature.parameters.keys()] UpperCamelCase :Optional[int] = ['''input_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Optional[int]: for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _A ( ): UpperCamelCase :Any = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' ) UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase ( self ) -> Tuple: return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ) if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = self.default_feature_extractor UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.default_feature_extractor UpperCamelCase , UpperCamelCase :Dict = prepare_audio() UpperCamelCase :Dict = audio.squeeze().numpy() UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase :List[Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
658
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""", """BridgeTower/bridgetower-base-itm-mlm""": ( """https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json""" ), } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[int] ='bridgetower_vision_model' def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=288 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1e-05 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> str: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = hidden_size UpperCamelCase :List[str] = num_hidden_layers UpperCamelCase :Dict = num_channels UpperCamelCase :Dict = patch_size UpperCamelCase :List[Any] = image_size UpperCamelCase :Optional[Any] = initializer_factor UpperCamelCase :int = layer_norm_eps UpperCamelCase :Any = stop_gradient UpperCamelCase :Any = share_layernorm UpperCamelCase :List[Any] = remove_last_layer @classmethod def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig": UpperCamelCase , UpperCamelCase :str = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config_dict.get('''model_type''' ) == "bridgetower": UpperCamelCase :Tuple = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[Any] ='bridgetower_text_model' def __init__( self , SCREAMING_SNAKE_CASE_=5_0265 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=514 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1e-05 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = vocab_size UpperCamelCase :List[str] = hidden_size UpperCamelCase :int = num_hidden_layers UpperCamelCase :Union[str, Any] = num_attention_heads UpperCamelCase :Optional[Any] = hidden_act UpperCamelCase :Any = initializer_factor UpperCamelCase :Optional[int] = intermediate_size UpperCamelCase :List[str] = hidden_dropout_prob UpperCamelCase :Any = attention_probs_dropout_prob UpperCamelCase :List[Any] = max_position_embeddings UpperCamelCase :str = type_vocab_size UpperCamelCase :Tuple = layer_norm_eps UpperCamelCase :str = position_embedding_type UpperCamelCase :Tuple = use_cache UpperCamelCase :Dict = pad_token_id UpperCamelCase :Optional[Any] = bos_token_id UpperCamelCase :Dict = eos_token_id @classmethod def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig": UpperCamelCase , UpperCamelCase :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config_dict.get('''model_type''' ) == "bridgetower": UpperCamelCase :Optional[Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : List[str] ='bridgetower' def __init__( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1e-05 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="add" , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]: # TODO: remove this once the Hub files are updated. UpperCamelCase :List[str] = kwargs.pop('''text_config_dict''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = kwargs.pop('''vision_config_dict''' , SCREAMING_SNAKE_CASE_ ) super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = share_cross_modal_transformer_layers UpperCamelCase :Tuple = hidden_act UpperCamelCase :str = hidden_size UpperCamelCase :Optional[int] = initializer_factor UpperCamelCase :List[str] = layer_norm_eps UpperCamelCase :Optional[Any] = share_link_tower_layers UpperCamelCase :str = link_tower_type UpperCamelCase :Dict = num_attention_heads UpperCamelCase :Optional[Any] = num_hidden_layers UpperCamelCase :str = tie_word_embeddings UpperCamelCase :int = init_layernorm_from_vision_encoder if text_config is None: UpperCamelCase :str = {} logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' ) if vision_config is None: UpperCamelCase :str = {} logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' ) UpperCamelCase :str = BridgeTowerTextConfig(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = BridgeTowerVisionConfig(**SCREAMING_SNAKE_CASE_ ) @classmethod def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Dict = copy.deepcopy(self.__dict__ ) UpperCamelCase :Tuple = self.text_config.to_dict() UpperCamelCase :Optional[int] = self.vision_config.to_dict() UpperCamelCase :Dict = self.__class__.model_type return output
658
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): return image elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ): UpperCamelCase :Dict = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 ) UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0 UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase :Tuple = 2.0 * image - 1.0 UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) return image def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ): if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): UpperCamelCase :int = True UpperCamelCase :Dict = va.device UpperCamelCase :List[Any] = va.cpu().numpy() UpperCamelCase :str = va.cpu().numpy() UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) ) if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD: UpperCamelCase :Any = (1 - t) * va + t * va else: UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = theta_a * t UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a UpperCamelCase :Union[str, Any] = sa * va + sa * va if inputs_are_torch: UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) return va def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ): UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ): for param in model.parameters(): UpperCamelCase :Any = value class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str: super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Union[str, Any] = ( feature_extractor.size if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ ) else feature_extractor.size['''shortest_edge'''] ) UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ ) set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: # get the original timestep using init_timestep UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int: if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' ) UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ ) ] UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) else: UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[str] = 0.1_8215 * init_latents UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) # get latents UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = init_latents return latents def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]: UpperCamelCase :List[str] = latents.detach().requires_grad_() UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep] UpperCamelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :str = self.scheduler.sigmas[index] UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :int = 1 / 0.1_8215 * sample UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype ) UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = latents.detach() + grads * (sigma**2) UpperCamelCase :Optional[Any] = noise_pred_original else: UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1: UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1) UpperCamelCase :Tuple = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]] UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ ) if style_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ ) # get prompt text embeddings for content and style UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # set timesteps UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) UpperCamelCase :List[str] = {} if accepts_offset: UpperCamelCase :Tuple = 1 self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device ) UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ ) # Preprocess image UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if clip_guidance_scale > 0: UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = slerp( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase :Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase :Any = content_text_input.input_ids.shape[-1] UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase :int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCamelCase :str = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase :Dict = {} if accepts_eta: UpperCamelCase :int = eta # check if the scheduler accepts generator UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: UpperCamelCase :List[str] = generator with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ): for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # expand the latents if we are doing classifier free guidance UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform classifier free guidance if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 ) UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: UpperCamelCase :int = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) UpperCamelCase , UpperCamelCase :str = self.cond_fn( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
658
1
import baseaa def _A ( SCREAMING_SNAKE_CASE__ : str ): return baseaa.aaaencode(string.encode('''utf-8''' ) ) def _A ( SCREAMING_SNAKE_CASE__ : bytes ): return baseaa.aaadecode(SCREAMING_SNAKE_CASE__ ).decode('''utf-8''' ) if __name__ == "__main__": import doctest doctest.testmod()
658
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :list[list[int]] = [] UpperCamelCase :list[int] = [] UpperCamelCase :List[str] = 0 UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ ) create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return result def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ): if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum: return if sum(SCREAMING_SNAKE_CASE__ ) == max_sum: result.append(SCREAMING_SNAKE_CASE__ ) return for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): create_state_space_tree( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , ) __snake_case = [3, 34, 4, 12, 5, 2] __snake_case = 9 __snake_case = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
658
1
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __snake_case = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ __snake_case = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ __snake_case = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=False ) -> str: UpperCamelCase :Any = compute_bleu( reference_corpus=SCREAMING_SNAKE_CASE_ , translation_corpus=SCREAMING_SNAKE_CASE_ , max_order=SCREAMING_SNAKE_CASE_ , smooth=SCREAMING_SNAKE_CASE_ ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
658
def _A ( SCREAMING_SNAKE_CASE__ : int ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ): if not 0 < percent < 100: raise ValueError('''solution() only accepts values from 0 to 100''' ) UpperCamelCase :Tuple = 0 UpperCamelCase :str = 1 while True: if check_bouncy(SCREAMING_SNAKE_CASE__ ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
658
1
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __snake_case = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __snake_case = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __snake_case = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def _A ( SCREAMING_SNAKE_CASE__ : Tuple ): def remove_articles(SCREAMING_SNAKE_CASE__ : List[str] ): UpperCamelCase :Dict = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(SCREAMING_SNAKE_CASE__ , ''' ''' , SCREAMING_SNAKE_CASE__ ) def white_space_fix(SCREAMING_SNAKE_CASE__ : Optional[Any] ): return " ".join(text.split() ) def remove_punc(SCREAMING_SNAKE_CASE__ : Optional[Any] ): UpperCamelCase :List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(SCREAMING_SNAKE_CASE__ : int ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE__ ) ) ) ) def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ): return int(normalize_answer(SCREAMING_SNAKE_CASE__ ) == normalize_answer(SCREAMING_SNAKE_CASE__ ) ) def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): UpperCamelCase :int = [any(compute_exact(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] return (sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )) * 100 def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): UpperCamelCase :List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase :List[Any] = Counter(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[int] = Counter(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase :str = scount * numref UpperCamelCase :Any = Counter(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase :Optional[int] = ccount * numref # KEEP UpperCamelCase :Union[str, Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase :Any = keepgramcounter_rep & rgramcounter UpperCamelCase :str = sgramcounter_rep & rgramcounter UpperCamelCase :Any = 0 UpperCamelCase :Union[str, Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase :str = 1 UpperCamelCase :int = 1 if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCamelCase :Optional[Any] = keeptmpscorea / len(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase :str = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase :Tuple = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase :Tuple = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase :List[str] = sgramcounter_rep - cgramcounter_rep UpperCamelCase :Dict = delgramcounter_rep - rgramcounter UpperCamelCase :List[str] = sgramcounter_rep - rgramcounter UpperCamelCase :List[Any] = 0 UpperCamelCase :List[str] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase :Optional[int] = 1 if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCamelCase :int = deltmpscorea / len(SCREAMING_SNAKE_CASE__ ) # ADDITION UpperCamelCase :Union[str, Any] = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Any = set(SCREAMING_SNAKE_CASE__ ) & set(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase :Optional[Any] = 1 UpperCamelCase :Dict = 1 if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCamelCase :Dict = addtmpscore / len(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCamelCase :Optional[Any] = addtmpscore / len(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase :str = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ): UpperCamelCase :Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = ssent.split(''' ''' ) UpperCamelCase :Any = csent.split(''' ''' ) UpperCamelCase :int = [] UpperCamelCase :int = [] UpperCamelCase :int = [] UpperCamelCase :List[str] = [] UpperCamelCase :Optional[Any] = [] UpperCamelCase :Optional[int] = [] UpperCamelCase :str = [] UpperCamelCase :List[str] = [] UpperCamelCase :Union[str, Any] = [] UpperCamelCase :str = [] for rsent in rsents: UpperCamelCase :Dict = rsent.split(''' ''' ) UpperCamelCase :List[Any] = [] UpperCamelCase :Tuple = [] UpperCamelCase :int = [] ragramslist.append(SCREAMING_SNAKE_CASE__ ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCamelCase :List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 2: UpperCamelCase :str = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 3: UpperCamelCase :List[str] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(SCREAMING_SNAKE_CASE__ ) ragramslist.append(SCREAMING_SNAKE_CASE__ ) ragramslist.append(SCREAMING_SNAKE_CASE__ ) ragramslist.append(SCREAMING_SNAKE_CASE__ ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCamelCase :Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 2: UpperCamelCase :List[str] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 3: UpperCamelCase :Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(SCREAMING_SNAKE_CASE__ ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCamelCase :int = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 2: UpperCamelCase :str = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 3: UpperCamelCase :Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(SCREAMING_SNAKE_CASE__ ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :List[str] = SARIngram(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :List[Any] = SARIngram(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :Dict = SARIngram(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :str = SARIngram(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :int = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase :Tuple = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase :List[str] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase :Any = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "13a" , SCREAMING_SNAKE_CASE__ : bool = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase :List[Any] = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase :Union[str, Any] = sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE__ )()(SCREAMING_SNAKE_CASE__ ) else: UpperCamelCase :str = sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE__ ) elif tokenizer == "moses": UpperCamelCase :Union[str, Any] = sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE__ , return_str=SCREAMING_SNAKE_CASE__ , escape=SCREAMING_SNAKE_CASE__ ) elif tokenizer == "penn": UpperCamelCase :Any = sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE__ , return_str=SCREAMING_SNAKE_CASE__ ) else: UpperCamelCase :int = sentence if not return_str: UpperCamelCase :Union[str, Any] = normalized_sent.split() return normalized_sent def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): if not (len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )): raise ValueError('''Sources length must match predictions and references lengths.''' ) UpperCamelCase :Optional[int] = 0 for src, pred, refs in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE__ ) , normalize(SCREAMING_SNAKE_CASE__ ) , [normalize(SCREAMING_SNAKE_CASE__ ) for sent in refs] ) UpperCamelCase :Tuple = sari_score / len(SCREAMING_SNAKE_CASE__ ) return 100 * sari_score def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]="exp" , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , ): UpperCamelCase :Any = len(references[0] ) if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) UpperCamelCase :int = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )] UpperCamelCase :int = sacrebleu.corpus_bleu( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , smooth_method=SCREAMING_SNAKE_CASE__ , smooth_value=SCREAMING_SNAKE_CASE__ , force=SCREAMING_SNAKE_CASE__ , lowercase=SCREAMING_SNAKE_CASE__ , use_effective_order=SCREAMING_SNAKE_CASE__ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase :Union[str, Any] = {} result.update({'''sari''': compute_sari(sources=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )} ) result.update({'''exact''': compute_em(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )} ) return result
658
def _A ( SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :Union[str, Any] = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) UpperCamelCase :str = hex_num[0] == '''-''' if is_negative: UpperCamelCase :Union[str, Any] = hex_num[1:] try: UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) UpperCamelCase :Dict = '''''' while int_num > 0: UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
658
1
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __snake_case = 10 def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if array[i] == target: return i return -1 def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Tuple = 0 UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ ) while left <= right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1 UpperCamelCase :str = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: UpperCamelCase :int = one_third - 1 elif array[two_third] < target: UpperCamelCase :Any = two_third + 1 else: UpperCamelCase :Any = one_third + 1 UpperCamelCase :int = two_third - 1 else: return -1 def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): if left < right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = (left + right) // 3 + 1 UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __snake_case = input("""Enter numbers separated by comma:\n""").strip() __snake_case = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __snake_case = int(input("""Enter the number to be found in the list:\n""").strip()) __snake_case = ite_ternary_search(collection, target) __snake_case = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
658
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase , UpperCamelCase :List[Any] = position UpperCamelCase :Any = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCamelCase :Dict = [] for position in positions: UpperCamelCase , UpperCamelCase :str = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE__ ) return permissible_positions def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): if is_complete(SCREAMING_SNAKE_CASE__ ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): UpperCamelCase , UpperCamelCase :Optional[int] = position if board[y][x] == 0: UpperCamelCase :Any = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ): return True UpperCamelCase :Union[str, Any] = 0 return False def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ): return board UpperCamelCase :str = 0 UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
658
1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): return "\n".join( F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
658
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('''foo.json''',)] ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :int = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' ) UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = GenerationConfig() UpperCamelCase :List[str] = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ ) # update_kwargs was not modified (no side effects) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[Any] = GenerationConfig() UpperCamelCase :Tuple = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Dict = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(default_config.num_beams , 1 ) UpperCamelCase :Tuple = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @classmethod def UpperCAmelCase ( cls ) -> Optional[Any]: UpperCamelCase :List[str] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def UpperCAmelCase ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Optional[Any] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
658
1
def _A ( SCREAMING_SNAKE_CASE__ : int ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ): if not 0 < percent < 100: raise ValueError('''solution() only accepts values from 0 to 100''' ) UpperCamelCase :Tuple = 0 UpperCamelCase :str = 1 while True: if check_bouncy(SCREAMING_SNAKE_CASE__ ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
658
def _A ( SCREAMING_SNAKE_CASE__ : int ): if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
658
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging __snake_case = { """cola""": 2, """mnli""": 3, """mrpc""": 2, """sst-2""": 2, """sts-b""": 1, """qqp""": 2, """qnli""": 2, """rte""": 2, """wnli""": 2, } logging.set_verbosity_info() def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ): # Initialise PyTorch model UpperCamelCase :str = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :int = finetuning_task.lower() if finetuning_task is not None else '''''' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' ) UpperCamelCase :Union[str, Any] = finetuning_task UpperCamelCase :Any = GLUE_TASKS_NUM_LABELS[finetuning_task] UpperCamelCase :List[str] = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE__ ) elif "squad" in finetuning_task: UpperCamelCase :Optional[int] = finetuning_task UpperCamelCase :int = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE__ ) else: UpperCamelCase :Optional[int] = XLNetLMHeadModel(SCREAMING_SNAKE_CASE__ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(F'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE__ )}''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) print(F'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE__ )}''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--xlnet_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained XLNet model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--finetuning_task""", default=None, type=str, help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""", ) __snake_case = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
658
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool __snake_case = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M' UpperCamelCase_ : Optional[Any] =( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) UpperCamelCase_ : Dict ='translator' UpperCamelCase_ : Any =AutoTokenizer UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM UpperCamelCase_ : List[Any] =LANGUAGE_CODES UpperCamelCase_ : int =['text', 'text', 'text'] UpperCamelCase_ : Union[str, Any] =['text'] def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) UpperCamelCase :Optional[int] = self.lang_to_code[src_lang] UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return self.model.generate(**SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
658
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_=0.01 , SCREAMING_SNAKE_CASE_=1000 ) -> str: UpperCamelCase :Union[str, Any] = p_stop UpperCamelCase :str = max_length def __iter__( self ) -> Tuple: UpperCamelCase :int = 0 UpperCamelCase :Dict = False while not stop and count < self.max_length: yield count count += 1 UpperCamelCase :Dict = random.random() < self.p_stop class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ) -> Optional[int]: UpperCamelCase :Optional[int] = [ BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 ) ] UpperCamelCase :List[Any] = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: # Check the shards when the dataset is a round multiple of total batch size. UpperCamelCase :int = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) # Expected shouldn't change self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. UpperCamelCase :Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. UpperCamelCase :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. UpperCamelCase :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is very small. UpperCamelCase :int = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [[], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: # Check the shards when the dataset is a round multiple of batch size. UpperCamelCase :List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) # Expected shouldn't change self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size. UpperCamelCase :Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. UpperCamelCase :Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is very small. UpperCamelCase :Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = [[], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: # Check the shards when the dataset is a round multiple of total batch size. UpperCamelCase :Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) # Expected shouldn't change self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. UpperCamelCase :int = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. UpperCamelCase :Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. UpperCamelCase :Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is very small. UpperCamelCase :Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = [[[0, 1]], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = [[], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Dict: # Check the shards when the dataset is a round multiple of batch size. UpperCamelCase :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) # Expected shouldn't change self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size. UpperCamelCase :Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. UpperCamelCase :Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is very small. UpperCamelCase :Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = [[[0, 1]], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = [[], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] UpperCamelCase :Dict = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False ) -> List[Any]: random.seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = [ IterableDatasetShard( SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , ) for i in range(SCREAMING_SNAKE_CASE_ ) ] UpperCamelCase :Union[str, Any] = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(SCREAMING_SNAKE_CASE_ ) iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :List[Any] = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size UpperCamelCase :Union[str, Any] = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 ) UpperCamelCase :Tuple = [] for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ): reference += reference self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Optional[Any] = 42 UpperCamelCase :Union[str, Any] = RandomIterableDataset() self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) # Edge case with a very small dataset UpperCamelCase :str = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Any = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 ) self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :int = DataLoader(list(range(16 ) ) , batch_size=4 ) UpperCamelCase :List[str] = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def UpperCAmelCase ( self ) -> Optional[int]: Accelerator() UpperCamelCase :Optional[int] = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
658
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __snake_case = 10 def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if array[i] == target: return i return -1 def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Tuple = 0 UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ ) while left <= right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1 UpperCamelCase :str = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: UpperCamelCase :int = one_third - 1 elif array[two_third] < target: UpperCamelCase :Any = two_third + 1 else: UpperCamelCase :Any = one_third + 1 UpperCamelCase :int = two_third - 1 else: return -1 def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): if left < right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = (left + right) // 3 + 1 UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __snake_case = input("""Enter numbers separated by comma:\n""").strip() __snake_case = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __snake_case = int(input("""Enter the number to be found in the list:\n""").strip()) __snake_case = ite_ternary_search(collection, target) __snake_case = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
658
1
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: return None class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: return None class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any =[ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> int: from transformers import BertModel UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) vocab_file.flush() UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ ) @require_tf @slow def UpperCAmelCase ( self ) -> str: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return path except Exception as e: self.fail(SCREAMING_SNAKE_CASE_ ) @require_torch @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[str]: from transformers import BertModel UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' ) @require_tf @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[Any]: from transformers import TFBertModel UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Assert all variables are present self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ ) self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
658
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if n == 0: return 0 UpperCamelCase :Union[str, Any] = float('''-inf''' ) for i in range(1 , n + 1 ): UpperCamelCase :str = max( SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) ) return max_revue def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: UpperCamelCase :Dict = float('''-inf''' ) for i in range(1 , n + 1 ): UpperCamelCase :Union[str, Any] = max( SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) UpperCamelCase :str = max_revenue return max_rev[n] def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): _enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )] UpperCamelCase :Dict = 0 for i in range(1 , n + 1 ): UpperCamelCase :Optional[Any] = max_rev[i] for j in range(1 , i + 1 ): UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] ) UpperCamelCase :Tuple = max_revenue_i return max_rev[n] def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ): if n < 0: UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if n > len(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Union[str, Any] = ( '''Each integral piece of rod must have a corresponding price. ''' F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) def _A ( ): UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23] UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. UpperCamelCase :str = 36 UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
658
1
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): UpperCamelCase :Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Dict = flatten_dict(SCREAMING_SNAKE_CASE__ ) return flax_params def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): UpperCamelCase :List[str] = {} UpperCamelCase :List[Any] = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } UpperCamelCase :List[Any] = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key UpperCamelCase :int = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): UpperCamelCase :str = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): UpperCamelCase :str = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number UpperCamelCase :Union[str, Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number UpperCamelCase :List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = flax_dict[key] UpperCamelCase :str = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): UpperCamelCase :Optional[Any] = torch.from_numpy(converted_dict[key].T ) else: UpperCamelCase :Dict = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : int=False ): UpperCamelCase :Union[str, Any] = get_flax_param(SCREAMING_SNAKE_CASE__ ) if not use_large: UpperCamelCase :int = PixaStructVisionConfig() UpperCamelCase :int = PixaStructTextConfig() else: UpperCamelCase :int = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) UpperCamelCase :Dict = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) UpperCamelCase :Union[str, Any] = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) UpperCamelCase :Union[str, Any] = PixaStructImageProcessor() UpperCamelCase :List[Any] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) if use_large: UpperCamelCase :str = 4096 UpperCamelCase :Optional[Any] = True # mkdir if needed os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") __snake_case = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
658
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case = logging.get_logger(__name__) __snake_case = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class UpperCAmelCase_ ( lowercase, lowercase ): """simple docstring""" UpperCamelCase_ : int ='focalnet' def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = image_size UpperCamelCase :Dict = patch_size UpperCamelCase :Tuple = num_channels UpperCamelCase :int = embed_dim UpperCamelCase :Optional[Any] = use_conv_embed UpperCamelCase :str = hidden_sizes UpperCamelCase :str = depths UpperCamelCase :Optional[int] = focal_levels UpperCamelCase :Tuple = focal_windows UpperCamelCase :Optional[int] = hidden_act UpperCamelCase :Optional[int] = mlp_ratio UpperCamelCase :Optional[Any] = hidden_dropout_prob UpperCamelCase :int = drop_path_rate UpperCamelCase :Dict = use_layerscale UpperCamelCase :List[str] = layerscale_value UpperCamelCase :Tuple = use_post_layernorm UpperCamelCase :int = use_post_layernorm_in_modulation UpperCamelCase :str = normalize_modulator UpperCamelCase :Any = initializer_range UpperCamelCase :Optional[Any] = layer_norm_eps UpperCamelCase :Dict = encoder_stride UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
658
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class UpperCAmelCase_ ( pl.LightningModule ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Dict: super().__init__() UpperCamelCase :int = model UpperCamelCase :int = 2 UpperCamelCase :str = nn.Linear(self.model.config.hidden_size , self.num_labels ) def UpperCAmelCase ( self ) -> str: pass def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): # load longformer model from model identifier UpperCamelCase :Optional[int] = LongformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = LightningModel(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model UpperCamelCase :Tuple = LongformerForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __snake_case = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
658
import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int: UpperCamelCase :Union[str, Any] = parent UpperCamelCase :Tuple = batch_size UpperCamelCase :Optional[Any] = image_size UpperCamelCase :Any = patch_size UpperCamelCase :List[str] = num_channels UpperCamelCase :int = is_training UpperCamelCase :str = use_labels UpperCamelCase :Optional[Any] = hidden_size UpperCamelCase :int = num_hidden_layers UpperCamelCase :List[Any] = backbone_out_indices UpperCamelCase :str = num_attention_heads UpperCamelCase :Tuple = intermediate_size UpperCamelCase :Optional[int] = hidden_act UpperCamelCase :List[Any] = hidden_dropout_prob UpperCamelCase :List[str] = attention_probs_dropout_prob UpperCamelCase :Union[str, Any] = initializer_range UpperCamelCase :List[Any] = num_labels UpperCamelCase :int = backbone_featmap_shape UpperCamelCase :Any = scope UpperCamelCase :int = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase :Dict = (image_size // patch_size) ** 2 UpperCamelCase :List[str] = num_patches + 1 def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase :Optional[int] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Any = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [96, 192, 384, 768], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase :Optional[Any] = self.num_labels UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase :Optional[int] = self.num_labels UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Dict = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () UpperCamelCase_ : Tuple =( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase_ : Tuple =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Tuple =False def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Union[str, Any] = DPTModelTester(self ) UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> int: pass def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase :int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Optional[int] = [*signature.parameters.keys()] UpperCamelCase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[str]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Any = True if model_class in get_values(SCREAMING_SNAKE_CASE_ ): continue UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.train() UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def UpperCAmelCase ( self ) -> Tuple: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Optional[Any] = False UpperCamelCase :List[Any] = True if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing: continue UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.gradient_checkpointing_enable() model.train() UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ ) # Skip the check for the backbone UpperCamelCase :Optional[int] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase ( self ) -> Any: pass @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[int]: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase :Union[str, Any] = '''add''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) def _A ( ): UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = prepare_img() UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = outputs.predicted_depth # verify the predicted depth UpperCamelCase :int = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
658
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : List[Any] ='gptj' UpperCamelCase_ : Tuple ={ 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE_=5_0400 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=28 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=5_0256 , SCREAMING_SNAKE_CASE_=5_0256 , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]: UpperCamelCase :Optional[int] = vocab_size UpperCamelCase :List[Any] = n_positions UpperCamelCase :Union[str, Any] = n_embd UpperCamelCase :Optional[int] = n_layer UpperCamelCase :Any = n_head UpperCamelCase :Any = n_inner UpperCamelCase :List[Any] = rotary_dim UpperCamelCase :List[str] = activation_function UpperCamelCase :Any = resid_pdrop UpperCamelCase :List[Any] = embd_pdrop UpperCamelCase :List[Any] = attn_pdrop UpperCamelCase :Optional[Any] = layer_norm_epsilon UpperCamelCase :int = initializer_range UpperCamelCase :Optional[Any] = use_cache UpperCamelCase :Tuple = bos_token_id UpperCamelCase :int = eos_token_id super().__init__( bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "default" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) -> List[Any]: super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ ) if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE_ ): # TODO: how to do that better? UpperCamelCase :List[Any] = 0 @property def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: UpperCamelCase :Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) UpperCamelCase :str = {0: '''batch''', 1: '''past_sequence + sequence'''} else: UpperCamelCase :List[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def UpperCAmelCase ( self ) -> int: return self._config.n_layer @property def UpperCAmelCase ( self ) -> int: return self._config.n_head def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ) -> Mapping[str, Any]: UpperCamelCase :Any = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) # We need to order the input in the way they appears in the forward() UpperCamelCase :Tuple = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCamelCase , UpperCamelCase :List[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCamelCase :Union[str, Any] = seqlen + 2 UpperCamelCase :Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) UpperCamelCase :Optional[Any] = [ (torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers ) ] UpperCamelCase :Union[str, Any] = common_inputs['''attention_mask'''] if self.use_past: UpperCamelCase :List[Any] = ordered_inputs['''attention_mask'''].dtype UpperCamelCase :List[Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 ) return ordered_inputs @property def UpperCAmelCase ( self ) -> int: return 13
658
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :Union[str, Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCamelCase :Any = 128 elif "12-12" in model_name: UpperCamelCase :Union[str, Any] = 12 UpperCamelCase :Any = 12 elif "14-14" in model_name: UpperCamelCase :Optional[int] = 14 UpperCamelCase :List[str] = 14 elif "16-16" in model_name: UpperCamelCase :List[Any] = 16 UpperCamelCase :Optional[Any] = 16 else: raise ValueError('''Model not supported''' ) UpperCamelCase :Tuple = '''huggingface/label-files''' if "speech-commands" in model_name: UpperCamelCase :Optional[Any] = 35 UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json''' else: UpperCamelCase :Optional[int] = 527 UpperCamelCase :List[Any] = '''audioset-id2label.json''' UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} UpperCamelCase :List[Any] = idalabel UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()} return config def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): if "module.v" in name: UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ): for key in orig_state_dict.copy().keys(): UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: UpperCamelCase :Any = key.split('''.''' ) UpperCamelCase :str = int(key_split[3] ) UpperCamelCase :Union[str, Any] = config.hidden_size if "weight" in key: UpperCamelCase :List[str] = val[:dim, :] UpperCamelCase :Optional[Any] = val[dim : dim * 2, :] UpperCamelCase :Optional[Any] = val[-dim:, :] else: UpperCamelCase :Dict = val[:dim] UpperCamelCase :Optional[int] = val[dim : dim * 2] UpperCamelCase :List[Any] = val[-dim:] else: UpperCamelCase :Union[str, Any] = val return orig_state_dict def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[str] = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ): UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict UpperCamelCase :Optional[int] = model_name_to_url[model_name] UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) # remove some keys remove_keys(SCREAMING_SNAKE_CASE__ ) # rename some keys UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load 🤗 model UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78 UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26 UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128 UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) if "speech-commands" in model_name: UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array'''] else: UpperCamelCase :List[Any] = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = waveform.squeeze().numpy() UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' ) # forward pass UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ ) UpperCamelCase :str = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __snake_case = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
658
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool __snake_case = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M' UpperCamelCase_ : Optional[Any] =( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) UpperCamelCase_ : Dict ='translator' UpperCamelCase_ : Any =AutoTokenizer UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM UpperCamelCase_ : List[Any] =LANGUAGE_CODES UpperCamelCase_ : int =['text', 'text', 'text'] UpperCamelCase_ : Union[str, Any] =['text'] def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) UpperCamelCase :Optional[int] = self.lang_to_code[src_lang] UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return self.model.generate(**SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
658
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __snake_case = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __snake_case = {"""facebook/blenderbot_small-90M""": 5_12} def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): UpperCamelCase :List[Any] = set() UpperCamelCase :Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase :Tuple = char UpperCamelCase :Tuple = set(SCREAMING_SNAKE_CASE__ ) return pairs class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['input_ids', 'attention_mask'] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="__start__" , SCREAMING_SNAKE_CASE_="__end__" , SCREAMING_SNAKE_CASE_="__unk__" , SCREAMING_SNAKE_CASE_="__null__" , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: UpperCamelCase :Union[str, Any] = json.load(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: UpperCamelCase :Optional[int] = merges_handle.read().split('''\n''' )[1:-1] UpperCamelCase :Optional[Any] = [tuple(merge.split() ) for merge in merges] UpperCamelCase :Dict = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase :List[str] = {} @property def UpperCAmelCase ( self ) -> int: return len(self.encoder ) def UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str: if token in self.cache: return self.cache[token] UpperCamelCase :Optional[Any] = re.sub('''([.,!?()])''' , r''' \1''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = re.sub('''(\')''' , r''' \1 ''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = re.sub(r'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE_ ) if "\n" in token: UpperCamelCase :int = token.replace('''\n''' , ''' __newln__''' ) UpperCamelCase :Union[str, Any] = token.split(''' ''' ) UpperCamelCase :str = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE_ ): continue UpperCamelCase :Union[str, Any] = token.lower() UpperCamelCase :Union[str, Any] = tuple(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) UpperCamelCase :int = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: words.append(SCREAMING_SNAKE_CASE_ ) continue while True: UpperCamelCase :str = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase :Tuple = bigram UpperCamelCase :List[str] = [] UpperCamelCase :int = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: UpperCamelCase :str = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) new_word.extend(word[i:j] ) UpperCamelCase :Any = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase :Any = tuple(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: UpperCamelCase :Optional[int] = get_pairs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = '''@@ '''.join(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = word[:-4] UpperCamelCase :Union[str, Any] = word words.append(SCREAMING_SNAKE_CASE_ ) return " ".join(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :Optional[int] = [] UpperCamelCase :Optional[Any] = re.findall(r'''\S+\n?''' , SCREAMING_SNAKE_CASE_ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) ) return split_tokens def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :Union[str, Any] = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str: UpperCamelCase :Any = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip() return out_string def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase :Tuple = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCamelCase :List[str] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) UpperCamelCase :Optional[int] = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) UpperCamelCase :Optional[int] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file
658
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __snake_case = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ __snake_case = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ __snake_case = R""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase :Tuple = 0.0 for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0 UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ ) return { "accuracy": accuracy, }
658
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow __snake_case = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ """text-classification""", """language-modeling""", """summarization""", """token-classification""", """question-answering""", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) __snake_case = logging.getLogger() def _A ( ): UpperCamelCase :List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase :Dict = parser.parse_args() return args.f def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ): UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) raise ValueError(F'''can\'t find {path}''' ) __snake_case = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[Any] = F''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_glue.main() UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :int = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[Any] = F''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_clm_flax.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Dict = self.get_auto_remove_tmp_dir() UpperCamelCase :Any = F''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_summarization_flax.main() UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir() UpperCamelCase :List[str] = F''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_mlm_flax.main() UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir() UpperCamelCase :int = F''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_ta_mlm_flax.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def UpperCAmelCase ( self ) -> Tuple: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2 UpperCamelCase :int = self.get_auto_remove_tmp_dir() UpperCamelCase :Optional[int] = F''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_ner.main() UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCAmelCase ( self ) -> Any: UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir() UpperCamelCase :Dict = F''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_qa.main() UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
658
1
import math def _A ( SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCamelCase :List[str] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any=1 , **SCREAMING_SNAKE_CASE__ : Dict ): UpperCamelCase :int = factor * value UpperCamelCase :Tuple = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
658
from __future__ import annotations from collections.abc import Callable def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ): UpperCamelCase :Optional[Any] = x_start UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[int] = 0.0 for _ in range(SCREAMING_SNAKE_CASE__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCamelCase :Any = (x_end - x_start) / steps + xa UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCamelCase :Optional[int] = xa UpperCamelCase :List[str] = fxa return area if __name__ == "__main__": def _A ( SCREAMING_SNAKE_CASE__ : int ): return x**3 + x**2 print("""f(x) = x^3 + x^2""") print("""The area between the curve, x = -5, x = 5 and the x axis is:""") __snake_case = 10 while i <= 10_00_00: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
658
1
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : list[int] ): UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) // 2 # choose the middle 3 elements UpperCamelCase :Optional[Any] = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
658
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,) UpperCamelCase_ : Any =10 def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :str = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**SCREAMING_SNAKE_CASE_ ) return config def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = 10 UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = scheduler.timesteps[0] UpperCamelCase :Union[str, Any] = scheduler.timesteps[1] UpperCamelCase :str = self.dummy_sample UpperCamelCase :List[str] = 0.1 * sample UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase ( self ) -> List[str]: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :List[Any] = self.scheduler_classes[0] UpperCamelCase :List[Any] = self.get_scheduler_config() UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = scheduler.timesteps UpperCamelCase :Union[str, Any] = torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = self.dummy_model() UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # 1. scale model input UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict noise residual UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. predict previous sample x_t-1 UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :Tuple = pred_prev_sample UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def UpperCAmelCase ( self ) -> str: UpperCamelCase :Dict = self.scheduler_classes[0] UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = scheduler.timesteps UpperCamelCase :int = torch.manual_seed(0 ) UpperCamelCase :str = self.dummy_model() UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict noise residual UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 3. predict previous sample x_t-1 UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase :int = pred_prev_sample UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[str] = self.scheduler_classes[0] UpperCamelCase :Tuple = self.get_scheduler_config() UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :List[str] = self.scheduler_classes[0] UpperCamelCase :List[Any] = self.get_scheduler_config() UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = [39, 30, 12, 1, 0] UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[int] = self.scheduler_classes[0] UpperCamelCase :List[str] = self.get_scheduler_config() UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
658
1
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __snake_case = logging.getLogger(__name__) __snake_case = """pytorch_model.bin""" @dataclasses.dataclass class UpperCAmelCase_ : """simple docstring""" UpperCamelCase_ : str =dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) UpperCamelCase_ : Optional[str] =dataclasses.field( default=lowercase, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'}, ) @dataclasses.dataclass class UpperCAmelCase_ : """simple docstring""" UpperCamelCase_ : str =dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) UpperCamelCase_ : str =dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) UpperCamelCase_ : Optional[str] =dataclasses.field( default=lowercase, metadata={'help': 'A csv or a json file containing the validation data.'} ) UpperCamelCase_ : Optional[str] =dataclasses.field( default=lowercase, metadata={'help': 'The name of the task to train on.'}, ) UpperCamelCase_ : Optional[List[str]] =dataclasses.field( default=lowercase, metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class UpperCAmelCase_ : """simple docstring""" UpperCamelCase_ : str =dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) UpperCamelCase_ : Optional[str] =dataclasses.field( default='accuracy', metadata={'help': 'The evaluation metric used for the task.'} ) UpperCamelCase_ : Optional[str] =dataclasses.field( default='no', metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' }, ) UpperCamelCase_ : Optional[int] =dataclasses.field( default=10, metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'}, ) UpperCamelCase_ : Optional[float] =dataclasses.field( default=0.0, metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' }, ) UpperCamelCase_ : Optional[bool] =dataclasses.field( default=lowercase, metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'}, ) UpperCamelCase_ : Optional[bool] =dataclasses.field( default=lowercase, metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'}, ) UpperCamelCase_ : Optional[bool] =dataclasses.field( default=lowercase, metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'}, ) UpperCamelCase_ : Optional[float] =dataclasses.field( default=0.0, metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'}, ) UpperCamelCase_ : Optional[int] =dataclasses.field( default=100, metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'}, ) UpperCamelCase_ : Optional[int] =dataclasses.field( default=lowercase, metadata={'help': 'Random seed for initialization.'}, ) def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :List[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: UpperCamelCase :Optional[int] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 UpperCamelCase :str = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) ) print(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Any = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) UpperCamelCase :Any = dataset.remove_columns(['''label''', '''probability'''] ) UpperCamelCase :Dict = dataset.rename_column('''prediction''' , '''label''' ) UpperCamelCase :Optional[int] = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} ) UpperCamelCase :str = dataset.shuffle(seed=args.seed ) UpperCamelCase :str = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' ) if args.data_file_extension == "csv": dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ ) else: dataset.to_json(SCREAMING_SNAKE_CASE__ ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ): UpperCamelCase :Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() UpperCamelCase :List[str] = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :int = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(SCREAMING_SNAKE_CASE__ ).items(): setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for key, value in kwargs.items(): if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Sanity checks UpperCamelCase :Any = {} UpperCamelCase :List[Any] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None UpperCamelCase :Union[str, Any] = args.train_file UpperCamelCase :List[Any] = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None UpperCamelCase :List[str] = args.eval_file for key in data_files: UpperCamelCase :Tuple = data_files[key].split('''.''' )[-1] assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: UpperCamelCase :int = extension else: assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('''Creating the initial data directory for self-training...''' ) UpperCamelCase :List[str] = F'''{args.output_dir}/self-train_iter-{{}}'''.format UpperCamelCase :Tuple = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() UpperCamelCase :Dict = None UpperCamelCase :int = None UpperCamelCase :List[str] = 0 UpperCamelCase :Union[str, Any] = False # Show the progress bar UpperCamelCase :int = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): UpperCamelCase :List[Any] = data_dir_format(SCREAMING_SNAKE_CASE__ ) assert os.path.exists(SCREAMING_SNAKE_CASE__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' ) UpperCamelCase :Union[str, Any] = { '''accelerator''': accelerator, '''model_name_or_path''': args.model_name_or_path, '''cache_dir''': args.cache_dir, '''do_train''': True, '''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''], '''do_eval''': True if args.eval_file is not None else False, '''eval_file''': data_files['''eval'''], '''do_predict''': True, '''infer_file''': data_files['''infer'''], '''task_name''': args.task_name, '''label_list''': args.label_list, '''output_dir''': current_output_dir, '''eval_metric''': args.eval_metric, '''evaluation_strategy''': args.evaluation_strategy, '''early_stopping_patience''': args.early_stopping_patience, '''early_stopping_threshold''': args.early_stopping_threshold, '''seed''': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): arguments_dict.update({key: value} ) UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): logger.info( '''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) else: logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ ) finetune(**SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() assert os.path.exists(SCREAMING_SNAKE_CASE__ ) logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data UpperCamelCase :Dict = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' ) # Update arguments_dict UpperCamelCase :Tuple = model_path UpperCamelCase :List[Any] = data_files['''train'''] UpperCamelCase :int = current_output_dir UpperCamelCase :Any = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): logger.info( '''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) else: logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ ) finetune(**SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() assert os.path.exists(SCREAMING_SNAKE_CASE__ ) logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[int] = iteration UpperCamelCase :Tuple = data_dir_format(iteration + 1 ) UpperCamelCase :str = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) ) UpperCamelCase :Tuple = config.idalabel UpperCamelCase :Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' ) UpperCamelCase :str = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' ) assert os.path.exists(SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: UpperCamelCase :Optional[int] = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] ) UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' ) assert os.path.exists(SCREAMING_SNAKE_CASE__ ) # Loading the dataset from local csv or json files. UpperCamelCase :Dict = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data'''] UpperCamelCase :int = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data'''] if accelerator.is_main_process: os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) ) create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() UpperCamelCase :Dict = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' ) if args.evaluation_strategy != IntervalStrategy.NO.value: UpperCamelCase :Dict = eval_result if best_iteration is None: UpperCamelCase :Any = new_iteration UpperCamelCase :Optional[Any] = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: UpperCamelCase :str = new_iteration UpperCamelCase :Dict = new_eval_result UpperCamelCase :int = 0 else: if new_eval_result == best_eval_result: UpperCamelCase :Dict = new_iteration UpperCamelCase :str = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: UpperCamelCase :Tuple = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ ) logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) else: # Assume that the last iteration is the best logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 ) logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
658
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __snake_case = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case = logging.get_logger(__name__) __snake_case = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class UpperCAmelCase_ ( lowercase, lowercase ): """simple docstring""" UpperCamelCase_ : int ='focalnet' def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict: super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = image_size UpperCamelCase :Dict = patch_size UpperCamelCase :Tuple = num_channels UpperCamelCase :int = embed_dim UpperCamelCase :Optional[Any] = use_conv_embed UpperCamelCase :str = hidden_sizes UpperCamelCase :str = depths UpperCamelCase :Optional[int] = focal_levels UpperCamelCase :Tuple = focal_windows UpperCamelCase :Optional[int] = hidden_act UpperCamelCase :Optional[int] = mlp_ratio UpperCamelCase :Optional[Any] = hidden_dropout_prob UpperCamelCase :int = drop_path_rate UpperCamelCase :Dict = use_layerscale UpperCamelCase :List[str] = layerscale_value UpperCamelCase :Tuple = use_post_layernorm UpperCamelCase :int = use_post_layernorm_in_modulation UpperCamelCase :str = normalize_modulator UpperCamelCase :Any = initializer_range UpperCamelCase :Optional[Any] = layer_norm_eps UpperCamelCase :Dict = encoder_stride UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
658
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: return None class UpperCAmelCase_ : """simple docstring""" def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: return None class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any =[ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) @require_torch @slow def UpperCAmelCase ( self ) -> int: from transformers import BertModel UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) vocab_file.flush() UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ ) @require_tf @slow def UpperCAmelCase ( self ) -> str: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return path except Exception as e: self.fail(SCREAMING_SNAKE_CASE_ ) @require_torch @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[str]: from transformers import BertModel UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' ) @require_tf @require_tokenizers @slow def UpperCAmelCase ( self ) -> List[Any]: from transformers import TFBertModel UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Assert all variables are present self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ ) self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
658
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer __snake_case = ["""bert-base-uncased""", """bert-base-cased"""] __snake_case = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class UpperCAmelCase_ ( tf.keras.Model ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Any: super().__init__() UpperCamelCase :Union[str, Any] = tokenizer UpperCamelCase :int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCamelCase :List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = self.bert(**SCREAMING_SNAKE_CASE_ ) return out["pooler_output"] @require_tf @require_tensorflow_text class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> str: super().setUp() UpperCamelCase :Tuple = [ BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false UpperCamelCase :List[str] = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCamelCase :Tuple = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] UpperCamelCase :Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def UpperCAmelCase ( self ) -> Tuple: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' , padding='''longest''' ) UpperCamelCase :Union[str, Any] = tf_tokenizer(SCREAMING_SNAKE_CASE_ ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def UpperCAmelCase ( self ) -> int: for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[str] = tf_tokenizer(self.paired_sentences ) UpperCamelCase :int = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def UpperCAmelCase ( self ) -> Optional[Any]: for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :Optional[int] = tf.function(SCREAMING_SNAKE_CASE_ ) for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tf.constant(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = compiled_tokenizer(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = tf_tokenizer(SCREAMING_SNAKE_CASE_ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :str = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = tf.convert_to_tensor(self.test_sentences ) UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCamelCase :Optional[int] = Path(SCREAMING_SNAKE_CASE_ ) / '''saved.model''' model.save(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = tf.keras.models.load_model(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = loaded_model(SCREAMING_SNAKE_CASE_ ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
658
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase_ ( lowercase ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Union[str, Any] = tempfile.mkdtemp() UpperCamelCase :List[str] = 5 # Realm tok UpperCamelCase :List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def UpperCAmelCase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records ) return config def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Tuple = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=SCREAMING_SNAKE_CASE_ , ) return block_records def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Optional[int] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[Any] = self.get_config() UpperCamelCase :str = self.get_dummy_retriever() UpperCamelCase :int = retriever.tokenizer UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' ) UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids UpperCamelCase :Tuple = tokenizer( ['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids UpperCamelCase :Optional[Any] = config.reader_seq_len UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def UpperCAmelCase ( self ) -> int: UpperCamelCase :Union[str, Any] = self.get_config() UpperCamelCase :Union[str, Any] = self.get_dummy_retriever() UpperCamelCase :Dict = retriever.tokenizer UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' ) UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids UpperCamelCase :Optional[Any] = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids UpperCamelCase :Any = config.reader_seq_len UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :str = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: UpperCamelCase :Tuple = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
658
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> int: UpperCamelCase :Optional[Any] = '''ylacombe/bark-small''' UpperCamelCase :Optional[int] = tempfile.mkdtemp() UpperCamelCase :Union[str, Any] = '''en_speaker_1''' UpperCamelCase :str = '''This is a test string''' UpperCamelCase :Dict = '''speaker_embeddings_path.json''' UpperCamelCase :int = '''speaker_embeddings''' def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Optional[Any] = self.get_tokenizer() UpperCamelCase :Tuple = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase :Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :str = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) UpperCamelCase :Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCamelCase :Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase :Tuple = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) UpperCamelCase :List[str] = 35 UpperCamelCase :Any = 2 UpperCamelCase :Any = 8 UpperCamelCase :Any = { '''semantic_prompt''': np.ones(SCREAMING_SNAKE_CASE_ ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCamelCase :str = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCamelCase :Any = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :Tuple = self.get_tokenizer() UpperCamelCase :int = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = processor(text=self.input_string ) UpperCamelCase :Dict = tokenizer( self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
658
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]: UpperCamelCase :int = parent UpperCamelCase :List[Any] = batch_size UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[int] = max_length UpperCamelCase :Union[str, Any] = num_mel_bins UpperCamelCase :Optional[int] = is_training UpperCamelCase :Dict = use_labels UpperCamelCase :Dict = hidden_size UpperCamelCase :Optional[int] = num_hidden_layers UpperCamelCase :str = num_attention_heads UpperCamelCase :Optional[int] = intermediate_size UpperCamelCase :List[str] = hidden_act UpperCamelCase :List[str] = hidden_dropout_prob UpperCamelCase :List[Any] = attention_probs_dropout_prob UpperCamelCase :str = type_sequence_label_size UpperCamelCase :List[Any] = initializer_range UpperCamelCase :Union[str, Any] = scope UpperCamelCase :List[Any] = frequency_stride UpperCamelCase :Tuple = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension UpperCamelCase :Optional[int] = num_patches + 2 def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :str = self.get_config() return config, input_values, labels def UpperCAmelCase ( self ) -> List[Any]: return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = config_and_inputs UpperCamelCase :List[Any] = {'''input_values''': input_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) UpperCamelCase_ : Any =( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) UpperCamelCase_ : Optional[int] =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Optional[Any] =False UpperCamelCase_ : Dict =False def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = ASTModelTester(self ) UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> str: pass def UpperCAmelCase ( self ) -> int: UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase :Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Any = [*signature.parameters.keys()] UpperCamelCase :Optional[int] = ['''input_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Optional[int]: for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _A ( ): UpperCamelCase :Any = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' ) UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase ( self ) -> Tuple: return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ) if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = self.default_feature_extractor UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.default_feature_extractor UpperCamelCase , UpperCamelCase :Dict = prepare_audio() UpperCamelCase :Dict = audio.squeeze().numpy() UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase :List[Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
658
1
import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC __snake_case = parse(importlib.metadata.version("""torch""")) def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Version] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) UpperCamelCase :List[Any] = STR_OPERATION_TO_FUNC[operation] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = parse(importlib.metadata.version(SCREAMING_SNAKE_CASE__ ) ) return operation(SCREAMING_SNAKE_CASE__ , parse(SCREAMING_SNAKE_CASE__ ) ) def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): return compare_versions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
658
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): return image elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ): UpperCamelCase :Dict = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 ) UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0 UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase :Tuple = 2.0 * image - 1.0 UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) return image def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ): if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): UpperCamelCase :int = True UpperCamelCase :Dict = va.device UpperCamelCase :List[Any] = va.cpu().numpy() UpperCamelCase :str = va.cpu().numpy() UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) ) if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD: UpperCamelCase :Any = (1 - t) * va + t * va else: UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Union[str, Any] = theta_a * t UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a UpperCamelCase :Union[str, Any] = sa * va + sa * va if inputs_are_torch: UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) return va def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ): UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ): for param in model.parameters(): UpperCamelCase :Any = value class UpperCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str: super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :Union[str, Any] = ( feature_extractor.size if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ ) else feature_extractor.size['''shortest_edge'''] ) UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ ) set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> int: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: # get the original timestep using init_timestep UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 ) UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int: if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' ) UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ ) ] UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) else: UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[str] = 0.1_8215 * init_latents UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) # get latents UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = init_latents return latents def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]: UpperCamelCase :List[str] = latents.detach().requires_grad_() UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep] UpperCamelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :str = self.scheduler.sigmas[index] UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :int = 1 / 0.1_8215 * sample UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype ) UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Dict = latents.detach() + grads * (sigma**2) UpperCamelCase :Optional[Any] = noise_pred_original else: UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1: UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1) UpperCamelCase :Tuple = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]] UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ ) if style_prompt is None: if len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ ) # get prompt text embeddings for content and style UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # duplicate text embeddings for each generation per prompt UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # set timesteps UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) UpperCamelCase :List[str] = {} if accepts_offset: UpperCamelCase :Tuple = 1 self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device ) UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ ) # Preprocess image UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if clip_guidance_scale > 0: UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = slerp( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase :Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase :Any = content_text_input.input_ids.shape[-1] UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase :int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCamelCase :str = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase :Dict = {} if accepts_eta: UpperCamelCase :int = eta # check if the scheduler accepts generator UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: UpperCamelCase :List[str] = generator with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ): for i, t in enumerate(SCREAMING_SNAKE_CASE_ ): # expand the latents if we are doing classifier free guidance UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform classifier free guidance if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 ) UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: UpperCamelCase :int = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) UpperCamelCase , UpperCamelCase :str = self.cond_fn( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
658
1
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :int = int(number**0.5 ) return number == sq * sq def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den UpperCamelCase :int = x_den * y_den * z_den UpperCamelCase :int = gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) top //= hcf bottom //= hcf return top, bottom def _A ( SCREAMING_SNAKE_CASE__ : int = 35 ): UpperCamelCase :set = set() UpperCamelCase :int UpperCamelCase :Fraction = Fraction(0 ) UpperCamelCase :tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 UpperCamelCase :Tuple = x_num * y_den + x_den * y_num UpperCamelCase :Tuple = x_den * y_den UpperCamelCase :List[str] = gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase :str = add_three( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) unique_s.add(SCREAMING_SNAKE_CASE__ ) # n=2 UpperCamelCase :str = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) UpperCamelCase :Dict = x_den * x_den * y_den * y_den if is_sq(SCREAMING_SNAKE_CASE__ ) and is_sq(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :int = int(sqrt(SCREAMING_SNAKE_CASE__ ) ) UpperCamelCase :Tuple = int(sqrt(SCREAMING_SNAKE_CASE__ ) ) UpperCamelCase :Dict = gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase :Tuple = add_three( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) unique_s.add(SCREAMING_SNAKE_CASE__ ) # n=-1 UpperCamelCase :Dict = x_num * y_num UpperCamelCase :str = x_den * y_num + x_num * y_den UpperCamelCase :List[str] = gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase :int = add_three( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) unique_s.add(SCREAMING_SNAKE_CASE__ ) # n=2 UpperCamelCase :Any = x_num * x_num * y_num * y_num UpperCamelCase :Dict = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(SCREAMING_SNAKE_CASE__ ) and is_sq(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :int = int(sqrt(SCREAMING_SNAKE_CASE__ ) ) UpperCamelCase :List[str] = int(sqrt(SCREAMING_SNAKE_CASE__ ) ) UpperCamelCase :Optional[int] = gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: UpperCamelCase :Optional[int] = add_three( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) unique_s.add(SCREAMING_SNAKE_CASE__ ) for num, den in unique_s: total += Fraction(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
658
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :list[list[int]] = [] UpperCamelCase :list[int] = [] UpperCamelCase :List[str] = 0 UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ ) create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return result def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ): if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum: return if sum(SCREAMING_SNAKE_CASE__ ) == max_sum: result.append(SCREAMING_SNAKE_CASE__ ) return for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): create_state_space_tree( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , ) __snake_case = [3, 34, 4, 12, 5, 2] __snake_case = 9 __snake_case = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
658
1
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __snake_case = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str =GPTSwaTokenizer UpperCamelCase_ : Any =False UpperCamelCase_ : Optional[Any] =True UpperCamelCase_ : Dict =False def UpperCAmelCase ( self ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase :List[Any] = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :Dict = '''This is a test''' UpperCamelCase :List[Any] = '''This is a test''' return input_text, output_text def UpperCAmelCase ( self ) -> str: UpperCamelCase :Any = '''<s>''' UpperCamelCase :str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2000 ) def UpperCAmelCase ( self ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Optional[int] = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [465, 287, 265, 631, 842] ) UpperCamelCase :Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) # fmt: off self.assertListEqual( SCREAMING_SNAKE_CASE_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on UpperCamelCase :List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) UpperCamelCase :str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) # fmt: off self.assertListEqual( SCREAMING_SNAKE_CASE_ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] ) # fmt: on def UpperCAmelCase ( self ) -> List[str]: UpperCamelCase :Optional[Any] = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] UpperCamelCase :Dict = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # Test that decode_fast returns the input text for text, token_ids in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Tuple = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off UpperCamelCase :Union[str, Any] = {'''input_ids''': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=SCREAMING_SNAKE_CASE_ , )
658
def _A ( SCREAMING_SNAKE_CASE__ : int ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ): if not 0 < percent < 100: raise ValueError('''solution() only accepts values from 0 to 100''' ) UpperCamelCase :Tuple = 0 UpperCamelCase :str = 1 while True: if check_bouncy(SCREAMING_SNAKE_CASE__ ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
658
1
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _A ( SCREAMING_SNAKE_CASE__ : Any ): UpperCamelCase :Tuple = os.path.join(args.tf_model_dir , '''parameters.json''' ) UpperCamelCase :List[Any] = json.loads(open(SCREAMING_SNAKE_CASE__ ).read() ) if not params: raise ValueError( F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): UpperCamelCase :Dict = args.output + '''.pt''' UpperCamelCase :Optional[Any] = OrderedDict() with tf.device('''/CPU:0''' ): UpperCamelCase :Optional[int] = tf.train.load_checkpoint(args.tf_model_dir ) UpperCamelCase :List[str] = reader.get_variable_to_shape_map() for key_name in shapes.keys(): UpperCamelCase :List[str] = reader.get_tensor(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): UpperCamelCase :str = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): UpperCamelCase :Optional[int] = 8 UpperCamelCase :Optional[int] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time UpperCamelCase :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :List[str] = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.startswith('''model/moe''' ): UpperCamelCase :Any = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): UpperCamelCase :List[str] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player UpperCamelCase :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.endswith('''/softmlp/kernel''' ): UpperCamelCase :int = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player UpperCamelCase :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :str = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): UpperCamelCase :List[str] = key_name[-9:-7] for i in range(16 ): UpperCamelCase :List[Any] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) UpperCamelCase :Dict = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided UpperCamelCase :str = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.startswith('''model/mlp''' ): UpperCamelCase :Union[str, Any] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): UpperCamelCase :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player UpperCamelCase :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.endswith('''/p1/bias''' ): UpperCamelCase :str = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player UpperCamelCase :str = vnp.copy() # same because it is one dimensional UpperCamelCase :Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.endswith('''/p2/kernel''' ): UpperCamelCase :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player UpperCamelCase :Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.endswith('''/p2/bias''' ): UpperCamelCase :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player UpperCamelCase :Any = vnp.copy() # same because it is one dimensional UpperCamelCase :int = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.startswith('''model/ln''' ): UpperCamelCase :Union[str, Any] = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): UpperCamelCase :List[Any] = '''model.blocks.%d.feed_forward.norm.bias''' % player UpperCamelCase :Any = vnp.copy() # same because it is one dimensional UpperCamelCase :int = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.endswith('''/g''' ): UpperCamelCase :Any = '''model.blocks.%d.feed_forward.norm.weight''' % player UpperCamelCase :Dict = vnp.copy() # same because it is one dimensional UpperCamelCase :Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.startswith('''model/att''' ): UpperCamelCase :int = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): UpperCamelCase :Union[str, Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum UpperCamelCase :Tuple = state[:, 0, :, :] UpperCamelCase :List[str] = state[:, 1, :, :] UpperCamelCase :int = state[:, 2, :, :] UpperCamelCase :Dict = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :List[str] = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :Dict = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :int = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player UpperCamelCase :Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :int = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player UpperCamelCase :List[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Any = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player UpperCamelCase :Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.endswith('''/o/kernel''' ): UpperCamelCase :Any = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player UpperCamelCase :Any = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.startswith('''model/an''' ): UpperCamelCase :Tuple = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): UpperCamelCase :Union[str, Any] = '''model.blocks.%d.self_attn.norm.bias''' % player UpperCamelCase :int = vnp.copy() # same because it is one dimensional UpperCamelCase :str = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.endswith('''/g''' ): UpperCamelCase :int = '''model.blocks.%d.self_attn.norm.weight''' % player UpperCamelCase :str = vnp.copy() # same because it is one dimensional UpperCamelCase :Dict = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): UpperCamelCase :int = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] UpperCamelCase :str = '''model.%s.weight''' % nlayer UpperCamelCase :Optional[int] = vnp.copy() # same in embedded UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ ) if key_name.startswith('''model/wte''' ): UpperCamelCase :Tuple = '''lm_head.weight''' UpperCamelCase :Any = vnp.copy() # same in embedded UpperCamelCase :Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name.startswith('''model/wob''' ): UpperCamelCase :Optional[Any] = '''final_logits_bias''' UpperCamelCase :List[str] = vnp.copy() # same in embedded UpperCamelCase :Union[str, Any] = state.reshape((1, -1) ) UpperCamelCase :List[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name == "model/dense/kernel": UpperCamelCase :Dict = '''model.last_project.weight''' UpperCamelCase :List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ ) elif key_name == "model/dense_1/bias": UpperCamelCase :Dict = '''model.last_project.bias''' UpperCamelCase :List[Any] = vnp.copy() # same because it is one dimensional UpperCamelCase :Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ) torch.save(SCREAMING_SNAKE_CASE__ , args.output ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") __snake_case = parser.parse_args() convert_tf_gptsan_to_pt(args)
658
def _A ( SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :Union[str, Any] = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) UpperCamelCase :str = hex_num[0] == '''-''' if is_negative: UpperCamelCase :Union[str, Any] = hex_num[1:] try: UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) UpperCamelCase :Dict = '''''' while int_num > 0: UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
658
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """WavLMForAudioFrameClassification""", """WavLMForCTC""", """WavLMForSequenceClassification""", """WavLMForXVector""", """WavLMModel""", """WavLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase , UpperCamelCase :List[Any] = position UpperCamelCase :Any = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCamelCase :Dict = [] for position in positions: UpperCamelCase , UpperCamelCase :str = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE__ ) return permissible_positions def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ): if is_complete(SCREAMING_SNAKE_CASE__ ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): UpperCamelCase , UpperCamelCase :Optional[int] = position if board[y][x] == 0: UpperCamelCase :Any = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ): return True UpperCamelCase :Union[str, Any] = 0 return False def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Tuple = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ): return board UpperCamelCase :str = 0 UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
658
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( lowercase ): """simple docstring""" UpperCamelCase_ : List[Any] =['input_features', 'is_longer'] def __init__( self , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=4_8000 , SCREAMING_SNAKE_CASE_=480 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1_4000 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fusion" , SCREAMING_SNAKE_CASE_ = "repeatpad" , **SCREAMING_SNAKE_CASE_ , ) -> Any: super().__init__( feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase :List[str] = top_db UpperCamelCase :List[Any] = truncation UpperCamelCase :Tuple = padding UpperCamelCase :Dict = fft_window_size UpperCamelCase :str = (fft_window_size >> 1) + 1 UpperCamelCase :int = hop_length UpperCamelCase :List[Any] = max_length_s UpperCamelCase :List[str] = max_length_s * sampling_rate UpperCamelCase :str = sampling_rate UpperCamelCase :List[str] = frequency_min UpperCamelCase :List[str] = frequency_max UpperCamelCase :Optional[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=SCREAMING_SNAKE_CASE_ , max_frequency=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , norm=SCREAMING_SNAKE_CASE_ , mel_scale='''htk''' , ) UpperCamelCase :int = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=SCREAMING_SNAKE_CASE_ , max_frequency=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ) def UpperCAmelCase ( self ) -> Dict[str, Any]: UpperCamelCase :List[Any] = copy.deepcopy(self.__dict__ ) UpperCamelCase :Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray: UpperCamelCase :Dict = spectrogram( SCREAMING_SNAKE_CASE_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=SCREAMING_SNAKE_CASE_ , log_mel='''dB''' , ) return log_mel_spectrogram.T def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase :Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCamelCase :Tuple = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCamelCase :Any = [0] # randomly choose index for each part UpperCamelCase :Tuple = np.random.choice(ranges[0] ) UpperCamelCase :Union[str, Any] = np.random.choice(ranges[1] ) UpperCamelCase :Optional[int] = np.random.choice(ranges[2] ) UpperCamelCase :Any = mel[idx_front : idx_front + chunk_frames, :] UpperCamelCase :Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :] UpperCamelCase :Union[str, Any] = mel[idx_back : idx_back + chunk_frames, :] UpperCamelCase :Dict = torch.tensor(mel[None, None, :] ) UpperCamelCase :Union[str, Any] = torch.nn.functional.interpolate( SCREAMING_SNAKE_CASE_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = mel_shrink[0][0].numpy() UpperCamelCase :Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCamelCase :Any = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCamelCase :List[Any] = len(SCREAMING_SNAKE_CASE_ ) - max_length UpperCamelCase :Optional[int] = np.random.randint(0 , overflow + 1 ) UpperCamelCase :Dict = waveform[idx : idx + max_length] UpperCamelCase :Optional[Any] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCamelCase :Union[str, Any] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters ) UpperCamelCase :Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCamelCase :str = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCamelCase :List[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) UpperCamelCase :str = False else: UpperCamelCase :Any = self._random_mel_fusion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: UpperCamelCase :List[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCamelCase :List[str] = int(max_length / len(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :List[str] = np.stack(np.tile(SCREAMING_SNAKE_CASE_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCamelCase :List[str] = int(max_length / len(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :List[Any] = np.stack(np.tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Dict = np.pad(SCREAMING_SNAKE_CASE_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": UpperCamelCase :str = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters ) UpperCamelCase :int = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: UpperCamelCase :Tuple = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchFeature: UpperCamelCase :Dict = truncation if truncation is not None else self.truncation UpperCamelCase :str = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) UpperCamelCase :List[str] = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) UpperCamelCase :Dict = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCamelCase :Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): UpperCamelCase :List[str] = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCamelCase :Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCamelCase :List[str] = [np.asarray(SCREAMING_SNAKE_CASE_ )] # convert to mel spectrogram, truncate and pad if needed. UpperCamelCase :Optional[Any] = [ self._get_input_mel(SCREAMING_SNAKE_CASE_ , max_length if max_length else self.nb_max_samples , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech ] UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Any = [] for mel, longer in padded_inputs: input_mel.append(SCREAMING_SNAKE_CASE_ ) is_longer.append(SCREAMING_SNAKE_CASE_ ) if truncation == "fusion" and sum(SCREAMING_SNAKE_CASE_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCamelCase :str = np.random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase :Union[str, Any] = True if isinstance(input_mel[0] , SCREAMING_SNAKE_CASE_ ): UpperCamelCase :Any = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCamelCase :Optional[Any] = [[longer] for longer in is_longer] UpperCamelCase :List[str] = {'''input_features''': input_mel, '''is_longer''': is_longer} UpperCamelCase :List[str] = BatchFeature(SCREAMING_SNAKE_CASE_ ) if return_tensors is not None: UpperCamelCase :Optional[Any] = input_features.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return input_features
658
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('''foo.json''',)] ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :int = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' ) UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = GenerationConfig() UpperCamelCase :List[str] = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ ) # update_kwargs was not modified (no side effects) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :List[Any] = GenerationConfig() UpperCamelCase :Tuple = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Dict = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(default_config.num_beams , 1 ) UpperCamelCase :Tuple = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @classmethod def UpperCAmelCase ( cls ) -> Optional[Any]: UpperCamelCase :List[str] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def UpperCAmelCase ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Optional[Any] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
658
1