code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from __future__ import annotations class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = data lowerCAmelCase__ : Any = None lowerCAmelCase__ : Any = None def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): # In Order traversal of the tree """simple docstring""" if tree: display(tree.left ) print(tree.data ) display(tree.right ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def _SCREAMING_SNAKE_CASE ( ): # Main function for testing. """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Node(1 ) lowerCAmelCase__ : Union[str, Any] = Node(2 ) lowerCAmelCase__ : Optional[int] = Node(3 ) lowerCAmelCase__ : List[Any] = Node(4 ) lowerCAmelCase__ : Any = Node(5 ) lowerCAmelCase__ : Optional[Any] = Node(6 ) lowerCAmelCase__ : Optional[Any] = Node(7 ) lowerCAmelCase__ : Tuple = Node(8 ) lowerCAmelCase__ : Optional[Any] = Node(9 ) print(is_full_binary_tree(__snake_case ) ) print(depth_of_tree(__snake_case ) ) print("""Tree is: """ ) display(__snake_case ) if __name__ == "__main__": main()
565
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
0
"""simple docstring""" import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowerCAmelCase ( lowerCamelCase__ ): """simple docstring""" __magic_name__ :Tuple = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) __magic_name__ :Optional[int] = 'CIDAS/clipseg-rd64-refined' __magic_name__ :Dict = 'image_segmenter' __magic_name__ :Optional[Any] = CLIPSegForImageSegmentation __magic_name__ :Dict = ['image', 'text'] __magic_name__ :Optional[Any] = ['image'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' requires_backends(self , ['vision'] ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=__UpperCAmelCase , return_tensors='pt' ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' with torch.no_grad(): lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase ).logits return logits def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = outputs.cpu().detach().numpy() lowerCAmelCase__ :List[Any] = 0 lowerCAmelCase__ :List[str] = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
93
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
0
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger SCREAMING_SNAKE_CASE__ : int = get_logger(__name__) class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ = None ) -> Tuple: lowerCamelCase : str = ( os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) lowerCamelCase : int = Extractor def _lowercase ( self , UpperCamelCase__ ) -> List[Any]: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" lowerCamelCase : Dict = os.path.abspath(UpperCamelCase__ ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: return force_extract or ( not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ )) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> List[str]: lowerCamelCase : str = self.extractor.infer_extractor_format(UpperCamelCase__ ) if not extractor_format: return input_path lowerCamelCase : Any = self._get_output_path(UpperCamelCase__ ) if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ): self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return output_path class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' @classmethod @abstractmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> str: ... @staticmethod @abstractmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> Any: ... class UpperCamelCase__ (lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : List[bytes] = [] @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> str: with open(UpperCamelCase__ , "rb" ) as f: return f.read(UpperCamelCase__ ) @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ = b"" ) -> Dict: if not magic_number: lowerCamelCase : int = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers ) try: lowerCamelCase : Tuple = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ ) except OSError: return False return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers ) class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]: return tarfile.is_tarfile(UpperCamelCase__ ) @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: def resolved(UpperCamelCase__ ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase__ ) ) def badpath(UpperCamelCase__ , UpperCamelCase__ ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ ) def badlink(UpperCamelCase__ , UpperCamelCase__ ) -> bool: # Links are interpreted relative to the directory containing the link lowerCamelCase : Optional[Any] = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase__ ) lowerCamelCase : Any = resolved(UpperCamelCase__ ) for finfo in members: if badpath(finfo.name , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) lowerCamelCase : str = tarfile.open(UpperCamelCase__ ) tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) ) tar_file.close() class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = [b'\x1F\x8B'] @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: with gzip.open(UpperCamelCase__ , "rb" ) as gzip_file: with open(UpperCamelCase__ , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = [ b'PK\x03\x04', b'PK\x05\x06', # empty archive b'PK\x07\x08', # spanned archive ] @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ = b"" ) -> Union[str, Any]: if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase__ , "rb" ) as fp: lowerCamelCase : Any = _EndRecData(UpperCamelCase__ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: lowerCamelCase : Optional[Any] = fp.read(UpperCamelCase__ ) # CD is where we expect it to be if len(UpperCamelCase__ ) == sizeCentralDir: lowerCamelCase : int = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with zipfile.ZipFile(UpperCamelCase__ , "r" ) as zip_file: zip_file.extractall(UpperCamelCase__ ) zip_file.close() class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : Tuple = [b'\xFD\x37\x7A\x58\x5A\x00'] @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: with lzma.open(UpperCamelCase__ ) as compressed_file: with open(UpperCamelCase__ , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : str = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile" ) import rarfile os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) lowerCamelCase : Dict = rarfile.RarFile(UpperCamelCase__ ) rf.extractall(UpperCamelCase__ ) rf.close() class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : Tuple = [b'\x28\xb5\x2F\xFD'] @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard" ) import zstandard as zstd lowerCamelCase : int = zstd.ZstdDecompressor() with open(UpperCamelCase__ , "rb" ) as ifh, open(UpperCamelCase__ , "wb" ) as ofh: dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ ) class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = [b'\x42\x5A\x68'] @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: with bza.open(UpperCamelCase__ , "rb" ) as compressed_file: with open(UpperCamelCase__ , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : Any = [b'\x37\x7A\xBC\xAF\x27\x1C'] @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr" ) import pyazr os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with pyazr.SevenZipFile(UpperCamelCase__ , "r" ) as archive: archive.extractall(UpperCamelCase__ ) class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = [b'\x04\x22\x4D\x18'] @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4" ) import lza.frame with lza.frame.open(UpperCamelCase__ , "rb" ) as compressed_file: with open(UpperCamelCase__ , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCamelCase__ : '''simple docstring''' lowerCamelCase_ : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def _lowercase ( cls ) -> List[Any]: return max( len(UpperCamelCase__ ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase__ , UpperCamelCase__ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ ) except OSError: return b"" @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ = False ) -> List[Any]: warnings.warn( "Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use \'infer_extractor_format\' instead." , category=UpperCamelCase__ , ) lowerCamelCase : str = cls.infer_extractor_format(UpperCamelCase__ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def _lowercase ( cls , UpperCamelCase__ ) -> List[str]: # <Added version="2.4.0"/> lowerCamelCase : List[str] = cls._get_magic_number_max_length() lowerCamelCase : Optional[Any] = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ): return extractor_format @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = "deprecated" , ) -> List[Any]: os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ ) # Prevent parallel extractions lowerCamelCase : Optional[int] = str(Path(UpperCamelCase__ ).with_suffix(".lock" ) ) with FileLock(UpperCamelCase__ ): shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg warnings.warn( "Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use \'extractor_format\' instead." , category=UpperCamelCase__ , ) lowerCamelCase : int = extractor if extractor != "deprecated" else extractor_format else: lowerCamelCase : str = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase__ , UpperCamelCase__ ) else: warnings.warn( "Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0." , category=UpperCamelCase__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase__ ): return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
311
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
370
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
0
from typing import TYPE_CHECKING from ...utils import _LazyModule __A = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
484
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
0
"""simple docstring""" import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __A ( lowerCamelCase__ ): '''simple docstring''' lowerCAmelCase : int = 0 lowerCAmelCase : bool = False lowerCAmelCase : float = 3.0 class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.assertDictEqual(MockClass().to_kwargs() ,{} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() ,{'''a''': 2} ) self.assertDictEqual(MockClass(a=2 ,b=_snake_case ).to_kwargs() ,{'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 ,c=2.25 ).to_kwargs() ,{'''a''': 2, '''c''': 2.25} ) @require_cuda def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase__ : Optional[int] = GradScalerKwargs(init_scale=1_024 ,growth_factor=2 ) AcceleratorState._reset_state() lowercase__ : Union[str, Any] = Accelerator(mixed_precision='''fp16''' ,kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) lowercase__ : int = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale ,1_024.0 ) self.assertEqual(scaler._growth_factor ,2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor ,0.5 ) self.assertEqual(scaler._growth_interval ,2_000 ) self.assertEqual(scaler._enabled ,_snake_case ) @require_multi_gpu def UpperCAmelCase ( self : Tuple ) -> int: """simple docstring""" lowercase__ : List[str] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(_snake_case ,env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase_ = torch.nn.Linear(100, 200) lowerCAmelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase_ = """""" lowerCAmelCase_ = model.bucket_bytes_cap // (1_024 * 1_024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
560
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
0
def __lowerCAmelCase ( A_ : int , A_ : int ) -> str: if not isinstance(__snake_case , __snake_case ): raise ValueError("iterations must be defined as integers" ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) __UpperCAmelCase = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
221
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
0
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets _UpperCamelCase : Optional[int] = """\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ _UpperCamelCase : Optional[int] = """\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ _UpperCamelCase : str = """ Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def snake_case ( snake_case : List[Any] , snake_case : Union[str, Any] ) -> List[str]: """simple docstring""" return float((preds == labels).mean() ) def snake_case ( snake_case : Optional[int] , snake_case : Tuple ) -> Any: """simple docstring""" lowerCAmelCase = simple_accuracy(__snake_case , __snake_case ) lowerCAmelCase = float(fa_score(y_true=__snake_case , y_pred=__snake_case ) ) return { "accuracy": acc, "f1": fa, } def snake_case ( snake_case : int , snake_case : Dict ) -> str: """simple docstring""" lowerCAmelCase = np.array(__snake_case ) lowerCAmelCase = np.array(__snake_case ) lowerCAmelCase = en_sentvecs.shape[0] # mean centering lowerCAmelCase = en_sentvecs - np.mean(__snake_case , axis=0 ) lowerCAmelCase = in_sentvecs - np.mean(__snake_case , axis=0 ) lowerCAmelCase = cdist(__snake_case , __snake_case , 'cosine' ) lowerCAmelCase = np.array(range(__snake_case ) ) lowerCAmelCase = sim.argsort(axis=1 )[:, :10] lowerCAmelCase = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), 'references': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} else: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' )
284
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' UpperCamelCase__ : dict[str, float] = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 1_055.05_585, "footpound": 1.35_5818, } def __UpperCamelCase( _A : str , _A : str , _A : float ): '''simple docstring''' if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: UpperCAmelCase__ : List[str] = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {', '.join(__snake_case )}''' ) raise ValueError(__snake_case ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
614
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): _A : Optional[datasets.Features] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): _A : List[str] = PandasConfig def __lowerCamelCase ( self ): return datasets.DatasetInfo(features=self.config.features ) def __lowerCamelCase ( self , __A ): if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) __UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__A , (str, list, tuple) ): __UpperCAmelCase = data_files if isinstance(__A , __A ): __UpperCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __UpperCAmelCase = [dl_manager.iter_files(__A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] __UpperCAmelCase = [] for split_name, files in data_files.items(): if isinstance(__A , __A ): __UpperCAmelCase = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __UpperCAmelCase = [dl_manager.iter_files(__A ) for file in files] splits.append(datasets.SplitGenerator(name=__A , gen_kwargs={'files': files} ) ) return splits def __lowerCamelCase ( self , __A ): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __UpperCAmelCase = table_cast(__A , self.config.features.arrow_schema ) return pa_table def __lowerCamelCase ( self , __A ): for i, file in enumerate(itertools.chain.from_iterable(__A ) ): with open(__A , 'rb' ) as f: __UpperCAmelCase = pa.Table.from_pandas(pd.read_pickle(__A ) ) yield i, self._cast_table(__A )
126
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
0
"""simple docstring""" from string import ascii_uppercase __magic_name__ = {char: i for i, char in enumerate(ascii_uppercase)} __magic_name__ = dict(enumerate(ascii_uppercase)) def _A ( __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = len(__snake_case ) lowerCamelCase__ = 0 while True: if x == i: lowerCamelCase__ = 0 if len(__snake_case ) == len(__snake_case ): break key += key[i] i += 1 return key def _A ( __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = """""" lowerCamelCase__ = 0 for letter in message: if letter == " ": cipher_text += " " else: lowerCamelCase__ = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _A ( __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = """""" lowerCamelCase__ = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: lowerCamelCase__ = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _A ( ): """simple docstring""" lowerCamelCase__ = """THE GERMAN ATTACK""" lowerCamelCase__ = """SECRET""" lowerCamelCase__ = generate_key(__snake_case , __snake_case ) lowerCamelCase__ = cipher_text(__snake_case , __snake_case ) print(f"""Encrypted Text = {s}""" ) print(f"""Original Text = {original_text(__snake_case , __snake_case )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
129
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets _lowerCAmelCase = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ _lowerCAmelCase = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ _lowerCAmelCase = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ,__UpperCAmelCase="binary" ,__UpperCAmelCase=None ) -> Tuple: lowerCAmelCase__ : List[Any] = fa_score( __UpperCAmelCase ,__UpperCAmelCase ,labels=__UpperCAmelCase ,pos_label=__UpperCAmelCase ,average=__UpperCAmelCase ,sample_weight=__UpperCAmelCase ) return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
565
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
0
"""simple docstring""" from math import isqrt def __A (_SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(__snake_case ) + 1 ) ) def __A (_SCREAMING_SNAKE_CASE = 10**6 ) ->int: """simple docstring""" lowerCAmelCase__ :List[Any] = 0 lowerCAmelCase__ :Dict = 1 lowerCAmelCase__ :List[str] = 7 while prime_candidate < max_prime: primes_count += is_prime(__snake_case ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
93
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
0
from __future__ import annotations import math from collections.abc import Callable def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 100 ,) -> float: lowerCamelCase : List[Any] = x_start lowerCamelCase : List[str] = fnc(__snake_case ) lowerCamelCase : Any = 0.0 for _ in range(__snake_case ): # Approximates curve as a sequence of linear lines and sums their length lowerCamelCase : Tuple = (x_end - x_start) / steps + xa lowerCamelCase : List[str] = fnc(__snake_case ) length += math.hypot(xa - xa ,fxa - fxa ) # Increment step lowerCamelCase : Optional[int] = xa lowerCamelCase : List[Any] = fxa return length if __name__ == "__main__": def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') SCREAMING_SNAKE_CASE__ : Tuple = 10 while i <= 100000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
311
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
0
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def snake_case__ ( _A: Optional[int] , _A: Union[str, Any] , _A: Any ) -> str: '''simple docstring''' lowerCAmelCase = BertConfig.from_json_file(__snake_case ) print(f"Building PyTorch model from configuration: {config}" ) lowerCAmelCase = BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __lowercase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
370
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
0
import numpy as np def __A ( _lowercase ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __A ( _lowercase ): '''simple docstring''' return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
484
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
0
"""simple docstring""" from collections.abc import Sequence def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> float: return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> float: lowercase__ : int = 0.0 for coeff in reversed(__snake_case ): lowercase__ : List[str] = result * x + coeff return result if __name__ == "__main__": lowerCAmelCase_ = (0.0, 0.0, 5.0, 9.3, 7.0) lowerCAmelCase_ = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
560
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCAmelCase__ ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ : torch.FloatTensor class UpperCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" @register_to_config def __init__( self: List[str] , __lowerCAmelCase: int = 3 , __lowerCAmelCase: List[Any] = 3 , __lowerCAmelCase: Tuple = ("DownEncoderBlock2D",) , __lowerCAmelCase: Optional[int] = ("UpDecoderBlock2D",) , __lowerCAmelCase: Optional[int] = (64,) , __lowerCAmelCase: Union[str, Any] = 1 , __lowerCAmelCase: Tuple = "silu" , __lowerCAmelCase: Dict = 3 , __lowerCAmelCase: List[str] = 32 , __lowerCAmelCase: Union[str, Any] = 256 , __lowerCAmelCase: Optional[Any] = 32 , __lowerCAmelCase: str = None , __lowerCAmelCase: Optional[int] = 0.18215 , __lowerCAmelCase: Optional[Any] = "group" , ) -> Dict: '''simple docstring''' super().__init__() # pass init params to Encoder __UpperCAmelCase = Encoder( in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , ) __UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels __UpperCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 ) __UpperCAmelCase = VectorQuantizer(__lowerCAmelCase , __lowerCAmelCase , beta=0.25 , remap=__lowerCAmelCase , sane_index_shape=__lowerCAmelCase ) __UpperCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 ) # pass init params to Decoder __UpperCAmelCase = Decoder( in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , norm_type=__lowerCAmelCase , ) @apply_forward_hook def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: List[str] , __lowerCAmelCase: int = True ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase = self.encoder(__lowerCAmelCase ) __UpperCAmelCase = self.quant_conv(__lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=__lowerCAmelCase ) @apply_forward_hook def _UpperCAmelCase ( self: int , __lowerCAmelCase: List[str] , __lowerCAmelCase: Any = False , __lowerCAmelCase: Tuple = True ) -> List[Any]: '''simple docstring''' if not force_not_quantize: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.quantize(__lowerCAmelCase ) else: __UpperCAmelCase = h __UpperCAmelCase = self.post_quant_conv(__lowerCAmelCase ) __UpperCAmelCase = self.decoder(__lowerCAmelCase , quant if self.config.norm_type == "spatial" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__lowerCAmelCase ) def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Optional[Any] = True ) -> Any: '''simple docstring''' __UpperCAmelCase = sample __UpperCAmelCase = self.encode(__lowerCAmelCase ).latents __UpperCAmelCase = self.decode(__lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__lowerCAmelCase )
221
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase : Tuple = logging.get_logger(__name__) _UpperCamelCase : int = { """microsoft/unispeech-sat-base-100h-libri-ft""": ( """https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json""" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class _snake_case ( lowerCamelCase__ ): SCREAMING_SNAKE_CASE : List[Any] = 'unispeech-sat' def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=3_20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1_00 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=(5_12, 5_12, 5_12, 5_12, 15_00) , _SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5_04 , **_SCREAMING_SNAKE_CASE , ): '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = hidden_size lowerCAmelCase = feat_extract_norm lowerCAmelCase = feat_extract_activation lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = conv_bias lowerCAmelCase = num_conv_pos_embeddings lowerCAmelCase = num_conv_pos_embedding_groups lowerCAmelCase = len(self.conv_dim ) lowerCAmelCase = num_hidden_layers lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = feat_proj_dropout lowerCAmelCase = final_dropout lowerCAmelCase = layerdrop lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range lowerCAmelCase = vocab_size lowerCAmelCase = num_clusters lowerCAmelCase = do_stable_layer_norm lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase = apply_spec_augment lowerCAmelCase = mask_time_prob lowerCAmelCase = mask_time_length lowerCAmelCase = mask_time_min_masks lowerCAmelCase = mask_feature_prob lowerCAmelCase = mask_feature_length lowerCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase = num_codevectors_per_group lowerCAmelCase = num_codevector_groups lowerCAmelCase = contrastive_logits_temperature lowerCAmelCase = feat_quantizer_dropout lowerCAmelCase = num_negatives lowerCAmelCase = codevector_dim lowerCAmelCase = proj_codevector_dim lowerCAmelCase = diversity_loss_weight # ctc loss lowerCAmelCase = ctc_loss_reduction lowerCAmelCase = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = list(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = xvector_output_dim @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
284
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
0
'''simple docstring''' def __UpperCamelCase( _A : list[list[float]] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [] for data in source_data: for i, el in enumerate(__snake_case ): if len(__snake_case ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__snake_case ) ) return data_lists def __UpperCamelCase( _A : list[list[float]] , _A : list[int] ): '''simple docstring''' UpperCAmelCase__ : str = [] for dlist, weight in zip(__snake_case , __snake_case ): UpperCAmelCase__ : Tuple = min(__snake_case ) UpperCAmelCase__ : Optional[Any] = max(__snake_case ) UpperCAmelCase__ : Any = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCAmelCase__ : List[Any] = F'''Invalid weight of {weight:f} provided''' raise ValueError(__snake_case ) score_lists.append(__snake_case ) return score_lists def __UpperCamelCase( _A : list[list[float]] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__snake_case ): UpperCAmelCase__ : Any = final_scores[j] + ele return final_scores def __UpperCamelCase( _A : list[list[float]] , _A : list[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = get_data(__snake_case ) UpperCAmelCase__ : int = calculate_each_score(__snake_case , __snake_case ) UpperCAmelCase__ : int = generate_final_scores(__snake_case ) # append scores to source data for i, ele in enumerate(__snake_case ): source_data[i].append(__snake_case ) return source_data
614
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
0
'''simple docstring''' from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class UpperCAmelCase ( lowerCamelCase__ ): def __lowerCamelCase ( self ): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def __lowerCamelCase ( self ): __UpperCAmelCase = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} return Dataset.from_dict(__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self._create_example_records() __UpperCAmelCase = Dataset.from_list(__A ) self.assertListEqual(dset.column_names , ['col_1', 'col_2'] ) for i, r in enumerate(__A ): self.assertDictEqual(__A , example_records[i] ) def __lowerCamelCase ( self ): __UpperCAmelCase = self._create_example_records() __UpperCAmelCase = Dataset.from_list(__A ) __UpperCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def __lowerCamelCase ( self ): # checks what happens with missing columns __UpperCAmelCase = [{'col_1': 1}, {'col_2': 'x'}] __UpperCAmelCase = Dataset.from_list(__A ) self.assertDictEqual(dset[0] , {'col_1': 1} ) self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns def __lowerCamelCase ( self ): # checks if the type can be inferred from the second record __UpperCAmelCase = [{'col_1': []}, {'col_1': [1, 2]}] __UpperCAmelCase = Dataset.from_list(__A ) self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) ) def __lowerCamelCase ( self ): __UpperCAmelCase = Dataset.from_list([] ) self.assertEqual(len(__A ) , 0 ) self.assertListEqual(dset.column_names , [] )
126
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
0
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __magic_name__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ): snake_case = ['pixel_values'] def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] = True , SCREAMING_SNAKE_CASE_ : str = 1 / 255 , SCREAMING_SNAKE_CASE_ : Union[str, Any] = True , SCREAMING_SNAKE_CASE_ : List[Any] = 8 , **SCREAMING_SNAKE_CASE_ : Tuple , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = do_rescale lowerCamelCase__ = rescale_factor lowerCamelCase__ = do_pad lowerCamelCase__ = pad_size def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] = None ): lowerCamelCase__ , lowerCamelCase__ = get_image_size(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = (old_height // size + 1) * size - old_height lowerCamelCase__ = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : List[Any] = None , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : Any = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] , ): lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ = do_pad if do_pad is not None else self.do_pad lowerCamelCase__ = pad_size if pad_size is not None else self.pad_size lowerCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowerCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: lowerCamelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_pad: lowerCamelCase__ = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] lowerCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] lowerCamelCase__ = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
129
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCAmelCase_( lowerCamelCase__ ): '''simple docstring''' __lowercase : Union[str, Any] = 'openai-gpt' __lowercase : List[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,__UpperCAmelCase=4_0478 ,__UpperCAmelCase=512 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase="cls_index" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,**__UpperCAmelCase ,) -> List[Any]: lowerCAmelCase__ : List[str] = vocab_size lowerCAmelCase__ : str = n_positions lowerCAmelCase__ : List[str] = n_embd lowerCAmelCase__ : str = n_layer lowerCAmelCase__ : Union[str, Any] = n_head lowerCAmelCase__ : Union[str, Any] = afn lowerCAmelCase__ : Optional[Any] = resid_pdrop lowerCAmelCase__ : List[Any] = embd_pdrop lowerCAmelCase__ : int = attn_pdrop lowerCAmelCase__ : str = layer_norm_epsilon lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : Dict = summary_type lowerCAmelCase__ : Dict = summary_use_proj lowerCAmelCase__ : int = summary_activation lowerCAmelCase__ : Union[str, Any] = summary_first_dropout lowerCAmelCase__ : Tuple = summary_proj_to_labels super().__init__(**__UpperCAmelCase )
565
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _lowerCAmelCase ( lowerCamelCase__ ): """simple docstring""" __magic_name__ :Optional[int] = 'trocr' __magic_name__ :Union[str, Any] = ['past_key_values'] __magic_name__ :Dict = { 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self , __UpperCAmelCase=5_0_2_6_5 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase="gelu" , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :int = vocab_size lowerCAmelCase__ :str = d_model lowerCAmelCase__ :List[str] = decoder_layers lowerCAmelCase__ :Union[str, Any] = decoder_attention_heads lowerCAmelCase__ :List[Any] = decoder_ffn_dim lowerCAmelCase__ :Tuple = activation_function lowerCAmelCase__ :Optional[int] = max_position_embeddings lowerCAmelCase__ :int = dropout lowerCAmelCase__ :Optional[int] = attention_dropout lowerCAmelCase__ :str = activation_dropout lowerCAmelCase__ :Any = init_std lowerCAmelCase__ :int = decoder_layerdrop lowerCAmelCase__ :List[str] = use_cache lowerCAmelCase__ :Optional[int] = scale_embedding lowerCAmelCase__ :str = use_learned_position_embeddings lowerCAmelCase__ :Dict = layernorm_embedding super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
93
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
0
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]: warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
311
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' def snake_case__ ( _A: str , _A: str ) -> bool: '''simple docstring''' lowerCAmelCase = len(__snake_case ) lowerCAmelCase = len(__snake_case ) lowerCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] lowerCAmelCase = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCAmelCase = True if a[i].islower(): lowerCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
370
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
0
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __A ( _lowercase , _lowercase ): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _A = flax_key_tuple[:-1] + ('''weight''',) _A = torch.permute(__snake_case , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ): # linear layer _A = flax_key_tuple[:-1] + ('''weight''',) _A = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _A = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' if "metadata" in layer: _A = layer.split('''metadata''' ) _A = ''''''.join(split_layer[0] )[:-1] _A = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: _A = layer.split('''kvstore''' ) _A = ''''''.join(split_layer[0] )[:-1] _A = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: _A = layer.split('''/''' ) _A = '''/'''.join(split_layer[:-1] ) _A = (split_layer[-1],) if "kvstore/path" in layer: _A = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: _A = '''file''' else: _A = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = rename_keys(__snake_case ) _A = {} for k, v in current_block.items(): _A = v _A = new_current_block torch.save(__snake_case , __snake_case ) def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ): '''simple docstring''' _A = convert_file_size_to_int(__snake_case ) _A = [] _A = {} _A = 0 _A = 0 os.makedirs(__snake_case , exist_ok=__snake_case ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: _A = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] _A = flatten_dict(__snake_case , sep='''/''' ) _A = {} for layer in checkpoint_info.keys(): _A ,_A ,_A = get_key_and_tensorstore_dict( __snake_case , __snake_case , __snake_case ) if curr_real_layer_name in all_layers: _A = content else: _A = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _A = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _A = torch.tensor(__snake_case ) _A = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _A ,_A = rename_base_flax_keys(tuple(key.split('''/''' ) ) , __snake_case ) _A = '''/'''.join(__snake_case ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _A = os.path.join( __snake_case , weights_name.replace('''.bin''' , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) ) rename_and_save_block(__snake_case , __snake_case ) sharded_state_dicts.append(current_block.keys() ) del current_block _A = {} _A = 0 _A = raw_weights.to(getattr(__snake_case , __snake_case ) ) current_block_size += weight_size total_size += weight_size # Add the last block _A = os.path.join(__snake_case , weights_name.replace('''.bin''' , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) ) rename_and_save_block(__snake_case , __snake_case ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__snake_case ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _A = {} _A = {} for idx, shard in enumerate(__snake_case ): _A = weights_name.replace( '''.bin''' , f"""-{idx+1:05d}-of-{len(__snake_case ):05d}.bin""" ) # len(sharded_state_dicts):05d} _A = os.path.join(__snake_case , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) _A = shard for key in shard: _A = shard_file # Add the metadata _A = {'''total_size''': total_size} _A = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: _A = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) __A = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __A ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _A = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) _A = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) _A = TaTokenizer.from_pretrained('''t5-small''' ) _A = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' _A = tokenizer(__snake_case , return_tensors='''pt''' ).input_ids _A = model.generate(__snake_case , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
484
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
0
"""simple docstring""" def __UpperCAmelCase ( __lowerCamelCase ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence lowercase__ : Union[str, Any] = gray_code_sequence_string(__snake_case ) # # convert them to integers for i in range(len(__snake_case ) ): lowercase__ : List[Any] = int(sequence[i] , 2 ) return sequence def __UpperCAmelCase ( __lowerCamelCase ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] lowercase__ : str = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits lowercase__ : Tuple = gray_code_sequence_string(bit_count - 1 ) lowercase__ : Any = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): lowercase__ : Tuple = '''0''' + smaller_sequence[i] sequence.append(__snake_case ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): lowercase__ : Tuple = '''1''' + smaller_sequence[i] sequence.append(__snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
560
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
0
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __lowerCAmelCase ( A_ : int , A_ : Optional[Any]=0.9_99 , A_ : Union[str, Any]="cosine" , ) -> Any: if alpha_transform_type == "cosine": def alpha_bar_fn(A_ : Any ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A_ : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __UpperCAmelCase = [] for i in range(__snake_case ): __UpperCAmelCase = i / num_diffusion_timesteps __UpperCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) ) return torch.tensor(__snake_case , dtype=torch.floataa ) class UpperCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ : str = [e.name for e in KarrasDiffusionSchedulers] lowerCAmelCase__ : List[str] = 2 @register_to_config def __init__( self: Optional[Any] , __lowerCAmelCase: List[Any] = 1_000 , __lowerCAmelCase: Union[str, Any] = 0.00085 , __lowerCAmelCase: str = 0.012 , __lowerCAmelCase: Dict = "linear" , __lowerCAmelCase: Any = None , __lowerCAmelCase: str = "epsilon" , __lowerCAmelCase: Optional[int] = False , __lowerCAmelCase: Dict = False , __lowerCAmelCase: Optional[int] = 1.0 , __lowerCAmelCase: Union[str, Any] = "linspace" , __lowerCAmelCase: Dict = 0 , ) -> List[str]: '''simple docstring''' if trained_betas is not None: __UpperCAmelCase = torch.tensor(__lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": __UpperCAmelCase = torch.linspace(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __UpperCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __UpperCAmelCase = betas_for_alpha_bar(__lowerCAmelCase , alpha_transform_type="cosine" ) elif beta_schedule == "exp": __UpperCAmelCase = betas_for_alpha_bar(__lowerCAmelCase , alpha_transform_type="exp" ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) __UpperCAmelCase = 1.0 - self.betas __UpperCAmelCase = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __UpperCAmelCase = use_karras_sigmas def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[Any]=None ) -> List[Any]: '''simple docstring''' if schedule_timesteps is None: __UpperCAmelCase = self.timesteps __UpperCAmelCase = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __UpperCAmelCase = 1 if len(__lowerCAmelCase ) > 1 else 0 else: __UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(__lowerCAmelCase ) else timestep __UpperCAmelCase = self._index_counter[timestep_int] return indices[pos].item() @property def _UpperCAmelCase ( self: int ) -> Dict: '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Tuple , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase = self.index_for_timestep(__lowerCAmelCase ) __UpperCAmelCase = self.sigmas[step_index] __UpperCAmelCase = sample / ((sigma**2 + 1) ** 0.5) return sample def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Optional[Any] = None , __lowerCAmelCase: Optional[Any] = None , ) -> Tuple: '''simple docstring''' __UpperCAmelCase = num_inference_steps __UpperCAmelCase = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __UpperCAmelCase = np.linspace(0 , num_train_timesteps - 1 , __lowerCAmelCase , dtype=__lowerCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": __UpperCAmelCase = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __UpperCAmelCase = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __UpperCAmelCase = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __UpperCAmelCase = (np.arange(__lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(__lowerCAmelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) __UpperCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __UpperCAmelCase = np.log(__lowerCAmelCase ) __UpperCAmelCase = np.interp(__lowerCAmelCase , np.arange(0 , len(__lowerCAmelCase ) ) , __lowerCAmelCase ) if self.config.use_karras_sigmas: __UpperCAmelCase = self._convert_to_karras(in_sigmas=__lowerCAmelCase , num_inference_steps=self.num_inference_steps ) __UpperCAmelCase = np.array([self._sigma_to_t(__lowerCAmelCase , __lowerCAmelCase ) for sigma in sigmas] ) __UpperCAmelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __UpperCAmelCase = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) __UpperCAmelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __UpperCAmelCase = torch.from_numpy(__lowerCAmelCase ) __UpperCAmelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__lowerCAmelCase ).startswith("mps" ): # mps does not support float64 __UpperCAmelCase = timesteps.to(__lowerCAmelCase , dtype=torch.floataa ) else: __UpperCAmelCase = timesteps.to(device=__lowerCAmelCase ) # empty dt and derivative __UpperCAmelCase = None __UpperCAmelCase = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __UpperCAmelCase = defaultdict(__lowerCAmelCase ) def _UpperCAmelCase ( self: str , __lowerCAmelCase: Any , __lowerCAmelCase: Dict ) -> Dict: '''simple docstring''' __UpperCAmelCase = np.log(__lowerCAmelCase ) # get distribution __UpperCAmelCase = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __UpperCAmelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __UpperCAmelCase = low_idx + 1 __UpperCAmelCase = log_sigmas[low_idx] __UpperCAmelCase = log_sigmas[high_idx] # interpolate sigmas __UpperCAmelCase = (low - log_sigma) / (low - high) __UpperCAmelCase = np.clip(__lowerCAmelCase , 0 , 1 ) # transform interpolation to time range __UpperCAmelCase = (1 - w) * low_idx + w * high_idx __UpperCAmelCase = t.reshape(sigma.shape ) return t def _UpperCAmelCase ( self: str , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase = in_sigmas[-1].item() __UpperCAmelCase = in_sigmas[0].item() __UpperCAmelCase = 7.0 # 7.0 is the value used in the paper __UpperCAmelCase = np.linspace(0 , 1 , __lowerCAmelCase ) __UpperCAmelCase = sigma_min ** (1 / rho) __UpperCAmelCase = sigma_max ** (1 / rho) __UpperCAmelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _UpperCAmelCase ( self: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self.dt is None def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: List[str] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Any = True , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase = self.index_for_timestep(__lowerCAmelCase ) # advance index counter by 1 __UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(__lowerCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __UpperCAmelCase = self.sigmas[step_index] __UpperCAmelCase = self.sigmas[step_index + 1] else: # 2nd order / Heun's method __UpperCAmelCase = self.sigmas[step_index - 1] __UpperCAmelCase = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __UpperCAmelCase = 0 __UpperCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_next __UpperCAmelCase = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_next __UpperCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __UpperCAmelCase = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: __UpperCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __UpperCAmelCase = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __UpperCAmelCase = sigma_next - sigma_hat # store for 2nd order step __UpperCAmelCase = derivative __UpperCAmelCase = dt __UpperCAmelCase = sample else: # 2. 2nd order / Heun's method __UpperCAmelCase = (sample - pred_original_sample) / sigma_next __UpperCAmelCase = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __UpperCAmelCase = self.dt __UpperCAmelCase = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowerCAmelCase ) def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Any , __lowerCAmelCase: Optional[int] , ) -> Dict: '''simple docstring''' __UpperCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCAmelCase ): # mps does not support float64 __UpperCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __UpperCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __UpperCAmelCase = self.timesteps.to(original_samples.device ) __UpperCAmelCase = timesteps.to(original_samples.device ) __UpperCAmelCase = [self.index_for_timestep(__lowerCAmelCase , __lowerCAmelCase ) for t in timesteps] __UpperCAmelCase = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __UpperCAmelCase = sigma.unsqueeze(-1 ) __UpperCAmelCase = original_samples + noise * sigma return noisy_samples def __len__( self: Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.config.num_train_timesteps
221
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
0
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case ( lowerCamelCase__ ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): '''simple docstring''' lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_lengths lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = gelu_activation lowerCAmelCase = sinusoidal_embeddings lowerCAmelCase = causal lowerCAmelCase = asm lowerCAmelCase = n_langs lowerCAmelCase = vocab_size lowerCAmelCase = n_special lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = summary_type lowerCAmelCase = use_proj lowerCAmelCase = scope def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_input_lengths: lowerCAmelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = model( _SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase = model( _SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , ) ((lowerCAmelCase ) , ) = result_with_labels.to_tuple() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE ) ((lowerCAmelCase ) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = self.num_labels lowerCAmelCase = FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): '''simple docstring''' lowerCAmelCase = self.num_choices lowerCAmelCase = FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class _snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE : List[Any] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Tuple = ( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): '''simple docstring''' lowerCAmelCase = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = FlaubertModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE ) @slow def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @slow @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCAmelCase = True lowerCAmelCase = model_class(config=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.jit.trace( _SCREAMING_SNAKE_CASE , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'traced_model.pt' ) ) lowerCAmelCase = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , 'traced_model.pt' ) , map_location=_SCREAMING_SNAKE_CASE ) loaded(inputs_dict['input_ids'].to(_SCREAMING_SNAKE_CASE ) , inputs_dict['attention_mask'].to(_SCREAMING_SNAKE_CASE ) ) @require_torch class _snake_case ( unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' ) lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) with torch.no_grad(): lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] lowerCAmelCase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.tensor( [[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
284
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' def __UpperCamelCase( _A : list ): '''simple docstring''' if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] UpperCAmelCase__ : List[str] = grid[0] for row_n in range(1 , len(__snake_case ) ): UpperCAmelCase__ : Optional[int] = grid[row_n] UpperCAmelCase__ : Tuple = fill_row(__snake_case , __snake_case ) UpperCAmelCase__ : Union[str, Any] = grid[row_n] return grid[-1][-1] def __UpperCamelCase( _A : list , _A : list ): '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 , len(__snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
614
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
0
'''simple docstring''' import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2_048 , __A = 0.1 , ): super().__init__() __UpperCAmelCase = nn.Sequential( nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , ) __UpperCAmelCase = nn.Embedding(__A , __A ) __UpperCAmelCase = False __UpperCAmelCase = nn.Linear(__A , __A , bias=__A ) __UpperCAmelCase = nn.Dropout(p=__A ) __UpperCAmelCase = nn.ModuleList() for lyr_num in range(__A ): # FiLM conditional T5 decoder __UpperCAmelCase = DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A ) self.decoders.append(__A ) __UpperCAmelCase = TaLayerNorm(__A ) __UpperCAmelCase = nn.Dropout(p=__A ) __UpperCAmelCase = nn.Linear(__A , __A , bias=__A ) def __lowerCamelCase ( self , __A , __A ): __UpperCAmelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def __lowerCamelCase ( self , __A , __A , __A ): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. __UpperCAmelCase = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) __UpperCAmelCase = self.conditioning_emb(__A ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) __UpperCAmelCase = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. __UpperCAmelCase = torch.broadcast_to( torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , ) __UpperCAmelCase = self.position_encoding(__A ) __UpperCAmelCase = self.continuous_inputs_projection(__A ) inputs += position_encodings __UpperCAmelCase = self.dropout(__A ) # decoder: No padding present. __UpperCAmelCase = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. __UpperCAmelCase = [(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks] # cross attend style: concat encodings __UpperCAmelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) __UpperCAmelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: __UpperCAmelCase = lyr( __A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0] __UpperCAmelCase = self.decoder_norm(__A ) __UpperCAmelCase = self.post_dropout(__A ) __UpperCAmelCase = self.spec_out(__A ) return spec_out class UpperCAmelCase ( nn.Module ): def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ): super().__init__() __UpperCAmelCase = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) ) def __lowerCamelCase ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ): __UpperCAmelCase = self.layer[0]( __A , conditioning_emb=__A , attention_mask=__A , ) if encoder_hidden_states is not None: __UpperCAmelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) __UpperCAmelCase = self.layer[1]( __A , key_value_states=__A , attention_mask=__A , ) # Apply Film Conditional Feed Forward layer __UpperCAmelCase = self.layer[-1](__A , __A ) return (hidden_states,) class UpperCAmelCase ( nn.Module ): def __init__( self , __A , __A , __A , __A ): super().__init__() __UpperCAmelCase = TaLayerNorm(__A ) __UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=__A ) __UpperCAmelCase = Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A ) __UpperCAmelCase = nn.Dropout(__A ) def __lowerCamelCase ( self , __A , __A=None , __A=None , ): __UpperCAmelCase = self.layer_norm(__A ) if conditioning_emb is not None: __UpperCAmelCase = self.FiLMLayer(__A , __A ) # Self-attention block __UpperCAmelCase = self.attention(__A ) __UpperCAmelCase = hidden_states + self.dropout(__A ) return hidden_states class UpperCAmelCase ( nn.Module ): def __init__( self , __A , __A , __A , __A , __A ): super().__init__() __UpperCAmelCase = Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A ) __UpperCAmelCase = TaLayerNorm(__A , eps=__A ) __UpperCAmelCase = nn.Dropout(__A ) def __lowerCamelCase ( self , __A , __A=None , __A=None , ): __UpperCAmelCase = self.layer_norm(__A ) __UpperCAmelCase = self.attention( __A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , ) __UpperCAmelCase = hidden_states + self.dropout(__A ) return layer_output class UpperCAmelCase ( nn.Module ): def __init__( self , __A , __A , __A , __A ): super().__init__() __UpperCAmelCase = TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A ) __UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=__A ) __UpperCAmelCase = TaLayerNorm(__A , eps=__A ) __UpperCAmelCase = nn.Dropout(__A ) def __lowerCamelCase ( self , __A , __A=None ): __UpperCAmelCase = self.layer_norm(__A ) if conditioning_emb is not None: __UpperCAmelCase = self.film(__A , __A ) __UpperCAmelCase = self.DenseReluDense(__A ) __UpperCAmelCase = hidden_states + self.dropout(__A ) return hidden_states class UpperCAmelCase ( nn.Module ): def __init__( self , __A , __A , __A ): super().__init__() __UpperCAmelCase = nn.Linear(__A , __A , bias=__A ) __UpperCAmelCase = nn.Linear(__A , __A , bias=__A ) __UpperCAmelCase = nn.Linear(__A , __A , bias=__A ) __UpperCAmelCase = nn.Dropout(__A ) __UpperCAmelCase = NewGELUActivation() def __lowerCamelCase ( self , __A ): __UpperCAmelCase = self.act(self.wi_a(__A ) ) __UpperCAmelCase = self.wi_a(__A ) __UpperCAmelCase = hidden_gelu * hidden_linear __UpperCAmelCase = self.dropout(__A ) __UpperCAmelCase = self.wo(__A ) return hidden_states class UpperCAmelCase ( nn.Module ): def __init__( self , __A , __A=1E-6 ): super().__init__() __UpperCAmelCase = nn.Parameter(torch.ones(__A ) ) __UpperCAmelCase = eps def __lowerCamelCase ( self , __A ): __UpperCAmelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A ) __UpperCAmelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: __UpperCAmelCase = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class UpperCAmelCase ( nn.Module ): def __lowerCamelCase ( self , __A ): return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(__A , 3.0 )) )) class UpperCAmelCase ( nn.Module ): def __init__( self , __A , __A ): super().__init__() __UpperCAmelCase = nn.Linear(__A , out_features * 2 , bias=__A ) def __lowerCamelCase ( self , __A , __A ): __UpperCAmelCase = self.scale_bias(__A ) __UpperCAmelCase , __UpperCAmelCase = torch.chunk(__A , 2 , -1 ) __UpperCAmelCase = x * (1 + scale) + shift return x
126
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
0
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict __magic_name__ = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def _A ( __lowercase , __lowercase ): """simple docstring""" return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case , all_configs=__snake_case , save_infos=__snake_case ) lowerCamelCase__ = TestCommand(*__snake_case ) test_command.run() lowerCamelCase__ = os.path.join(__snake_case , """README.md""" ) assert os.path.exists(__snake_case ) lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case ) lowerCamelCase__ = DatasetInfosDict( { """default""": DatasetInfo( features=Features( { """tokens""": Sequence(Value("""string""" ) ), """ner_tags""": Sequence( ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ), """langs""": Sequence(Value("""string""" ) ), """spans""": Sequence(Value("""string""" ) ), } ) , splits=[ { """name""": """train""", """num_bytes""": 235_1563, """num_examples""": 1_0000, }, { """name""": """validation""", """num_bytes""": 23_8418, """num_examples""": 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos["""default"""] , __snake_case ), getattr(expected_dataset_infos["""default"""] , __snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case , __snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
129
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
0
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_( lowerCamelCase__ ): '''simple docstring''' __lowercase : Any = (DPMSolverSDEScheduler,) __lowercase : Tuple = 1_0 def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Tuple = { """num_train_timesteps""": 1100, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__UpperCAmelCase ) return config def UpperCAmelCase_ ( self ) -> Optional[int]: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] ,[0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=__UpperCAmelCase ,beta_end=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Any = self.dummy_model() lowerCAmelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Tuple = model(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : List[str] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase__ : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[int] = self.dummy_model() lowerCAmelCase__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : int = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : str = scheduler.scale_model_input(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2 assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ,device=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = self.dummy_model() lowerCAmelCase__ : List[str] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : Any = scheduler.scale_model_input(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : str = model(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : str = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : List[str] = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ,use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ,device=__UpperCAmelCase ) lowerCAmelCase__ : int = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : int = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[int] = scheduler.scale_model_input(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Any = scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
565
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
0
"""simple docstring""" import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" @register_to_config def __init__( self , *, __UpperCAmelCase = 4 , __UpperCAmelCase = 7_6_8 , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' super().__init__() lowerCAmelCase__ :List[Any] = nn.Parameter(torch.zeros(__UpperCAmelCase ) ) # parameters for additional clip time embeddings lowerCAmelCase__ :str = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) # parameters for encoder hidden states lowerCAmelCase__ :str = clip_extra_context_tokens lowerCAmelCase__ :Tuple = nn.Linear( __UpperCAmelCase , self.clip_extra_context_tokens * cross_attention_dim ) lowerCAmelCase__ :Optional[int] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :str = nn.LayerNorm(__UpperCAmelCase ) def snake_case ( self , *, __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowerCAmelCase__ :Tuple = image_embeddings.shape[0] lowerCAmelCase__ :List[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowerCAmelCase__ :List[Any] = classifier_free_guidance_embeddings.expand( __UpperCAmelCase , -1 ) lowerCAmelCase__ :List[Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowerCAmelCase__ :Union[str, Any] = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowerCAmelCase__ :Optional[Any] = self.embedding_proj(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowerCAmelCase__ :Any = self.clip_extra_context_tokens_proj(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = clip_extra_context_tokens.reshape(__UpperCAmelCase , -1 , self.clip_extra_context_tokens ) lowerCAmelCase__ :str = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowerCAmelCase__ :Optional[Any] = self.encoder_hidden_states_proj(__UpperCAmelCase ) lowerCAmelCase__ :str = self.text_encoder_hidden_states_norm(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
93
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
0
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow SCREAMING_SNAKE_CASE__ : Any = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Optional[Any]: lowerCamelCase : Any = [file for file in os.listdir(UpperCamelCase__ ) if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )] if identifier is not None: lowerCamelCase : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): for n_ in n_identifier: lowerCamelCase : Tuple = [file for file in files if n_ not in file] else: lowerCamelCase : List[Any] = [file for file in files if n_identifier not in file] lowerCamelCase : str = ignore_files or [] ignore_files.append("__init__.py" ) lowerCamelCase : Optional[int] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , UpperCamelCase__ ) if only_modules: lowerCamelCase : Union[str, Any] = file.split("." )[0] try: lowerCamelCase : Dict = getattr(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = doctest.DocTestSuite(UpperCamelCase__ ) lowerCamelCase : List[str] = unittest.TextTestRunner().run(UpperCamelCase__ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'''{module_identifier} is not a module.''' ) else: lowerCamelCase : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _lowercase ( self ) -> List[str]: lowerCamelCase : int = Path("src/transformers" ) lowerCamelCase : Tuple = "modeling" lowerCamelCase : Optional[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ , ignore_files=UpperCamelCase__ ) def _lowercase ( self ) -> str: lowerCamelCase : Union[str, Any] = Path("src/transformers" ) lowerCamelCase : Union[str, Any] = "tokenization" self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ ) def _lowercase ( self ) -> int: lowerCamelCase : Dict = Path("src/transformers" ) lowerCamelCase : Optional[int] = "configuration" self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ ) def _lowercase ( self ) -> Dict: lowerCamelCase : Dict = Path("src/transformers" ) lowerCamelCase : str = ["configuration", "modeling", "tokenization"] self.analyze_directory(UpperCamelCase__ , n_identifier=UpperCamelCase__ ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : Any = Path("docs/source" ) lowerCamelCase : str = ["favicon.ico"] self.analyze_directory(UpperCamelCase__ , ignore_files=UpperCamelCase__ , only_modules=UpperCamelCase__ )
311
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
0
'''simple docstring''' import os import time import numpy as np import onnxruntime as ort __lowercase = """1""" __lowercase = """0""" __lowercase = """1""" __lowercase = ort.SessionOptions() __lowercase = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('''Create inference session...''') __lowercase = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] __lowercase = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider) __lowercase = ort.RunOptions() __lowercase = 1_2_8 __lowercase = 1 __lowercase = np.ones((batch, sequence), dtype=np.intaa) __lowercase = np.ones((batch, sequence), dtype=np.intaa) __lowercase = np.ones((batch, sequence), dtype=np.intaa) print('''Warm up phase...''') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Start inference...''') __lowercase = time.time() __lowercase = 2_0_0_0 __lowercase = {} for iter in range(max_iters): __lowercase = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_0_0_0 / max_iters))
370
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): """simple docstring""" A_ = 'altclip_text_model' def __init__( self: List[Any] , __A: Tuple=25_00_02 , __A: Dict=10_24 , __A: int=24 , __A: Tuple=16 , __A: Optional[int]=40_96 , __A: Optional[Any]="gelu" , __A: Tuple=0.1 , __A: Dict=0.1 , __A: str=5_14 , __A: int=1 , __A: List[str]=0.02 , __A: Union[str, Any]=0.02 , __A: Union[str, Any]=1e-05 , __A: str=1 , __A: Optional[int]=0 , __A: Tuple=2 , __A: Union[str, Any]="absolute" , __A: Dict=True , __A: List[Any]=7_68 , **__A: Dict , ) -> Union[str, Any]: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = initializer_factor _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = project_dim class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): """simple docstring""" A_ = 'altclip_vision_model' def __init__( self: str , __A: Optional[Any]=7_68 , __A: Dict=30_72 , __A: Optional[Any]=5_12 , __A: Optional[Any]=12 , __A: str=12 , __A: List[str]=3 , __A: List[Any]=2_24 , __A: Union[str, Any]=32 , __A: Union[str, Any]="quick_gelu" , __A: Optional[int]=1e-5 , __A: Tuple=0.0 , __A: Any=0.02 , __A: Tuple=1.0 , **__A: Union[str, Any] , ) -> Tuple: super().__init__(**__A ) _A = hidden_size _A = intermediate_size _A = projection_dim _A = num_hidden_layers _A = num_attention_heads _A = num_channels _A = patch_size _A = image_size _A = initializer_range _A = initializer_factor _A = attention_dropout _A = layer_norm_eps _A = hidden_act @classmethod def __A ( cls: Optional[int] , __A: Any , **__A: List[Any] ) -> str: cls._set_token_in_kwargs(__A ) _A ,_A = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": _A = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): """simple docstring""" A_ = 'altclip' A_ = True def __init__( self: List[Any] , __A: Dict=None , __A: Optional[Any]=None , __A: Any=7_68 , __A: Any=2.6_592 , **__A: Any ) -> Any: _A = kwargs.pop('''text_config_dict''' , __A ) _A = kwargs.pop('''vision_config_dict''' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: _A = {} # This is the complete result when using `text_config_dict`. _A = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: _A = ( f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ f"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: _A = ( f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ f"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: _A = {} # This is the complete result when using `vision_config_dict`. _A = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _A = { str(__A ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: _A = ( f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: _A = ( f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ f"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: _A = {} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: _A = {} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) _A = AltCLIPTextConfig(**__A ) _A = AltCLIPVisionConfig(**__A ) _A = projection_dim _A = logit_scale_init_value _A = 1.0 @classmethod def __A ( cls: Any , __A: Tuple , __A: int , **__A: Dict ) -> Optional[Any]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def __A ( self: Any ) -> str: _A = copy.deepcopy(self.__dict__ ) _A = self.text_config.to_dict() _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
484
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
0
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) class __A ( enum.Enum ): '''simple docstring''' lowerCAmelCase : int = 0 lowerCAmelCase : str = 1 @add_end_docstrings(lowerCamelCase__ ) class __A ( lowerCamelCase__ ): '''simple docstring''' lowerCAmelCase : Optional[int] = 'generated' def __init__( self : List[str] ,*_snake_case : List[Any] ,**_snake_case : str ) -> Optional[Any]: """simple docstring""" super().__init__(*_snake_case ,**_snake_case ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase ( self : Optional[int] ,_snake_case : Union[str, Any]=None ,_snake_case : List[Any]=None ,_snake_case : int=None ,_snake_case : int=None ,_snake_case : Any=None ,_snake_case : Dict=None ,**_snake_case : List[Any] ,) -> Optional[Any]: """simple docstring""" lowercase__ : str = {} if truncation is not None: lowercase__ : str = truncation lowercase__ : Tuple = generate_kwargs lowercase__ : Union[str, Any] = {} if return_tensors is not None and return_type is None: lowercase__ : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: lowercase__ : Optional[Any] = return_type if clean_up_tokenization_spaces is not None: lowercase__ : Union[str, Any] = clean_up_tokenization_spaces if stop_sequence is not None: lowercase__ : Any = self.tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) if len(_snake_case ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) lowercase__ : Tuple = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase ( self : Any ,_snake_case : List[Any] ,_snake_case : Dict ,_snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" return True def UpperCAmelCase ( self : List[Any] ,*_snake_case : Dict ,_snake_case : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ : Any = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] ,_snake_case ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) lowercase__ : Union[str, Any] = ([prefix + arg for arg in args[0]],) lowercase__ : str = True elif isinstance(args[0] ,_snake_case ): lowercase__ : Dict = (prefix + args[0],) lowercase__ : Tuple = False else: raise ValueError( f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) lowercase__ : List[Any] = self.tokenizer(*_snake_case ,padding=_snake_case ,truncation=_snake_case ,return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : str ,*_snake_case : str ,**_snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[Any] = super().__call__(*_snake_case ,**_snake_case ) if ( isinstance(args[0] ,_snake_case ) and all(isinstance(_snake_case ,_snake_case ) for el in args[0] ) and all(len(_snake_case ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : List[str]=TruncationStrategy.DO_NOT_TRUNCATE ,**_snake_case : Optional[int] ) -> int: """simple docstring""" lowercase__ : int = self._parse_and_tokenize(_snake_case ,truncation=_snake_case ,**_snake_case ) return inputs def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[Any] ,**_snake_case : Dict ) -> Tuple: """simple docstring""" if self.framework == "pt": lowercase__ , lowercase__ : Optional[Any] = model_inputs['''input_ids'''].shape elif self.framework == "tf": lowercase__ , lowercase__ : Any = tf.shape(model_inputs['''input_ids'''] ).numpy() lowercase__ : Optional[Any] = generate_kwargs.get('''min_length''' ,self.model.config.min_length ) lowercase__ : int = generate_kwargs.get('''max_length''' ,self.model.config.max_length ) self.check_inputs(_snake_case ,generate_kwargs['''min_length'''] ,generate_kwargs['''max_length'''] ) lowercase__ : Any = self.model.generate(**_snake_case ,**_snake_case ) lowercase__ : Optional[Any] = output_ids.shape[0] if self.framework == "pt": lowercase__ : List[str] = output_ids.reshape(_snake_case ,out_b // in_b ,*output_ids.shape[1:] ) elif self.framework == "tf": lowercase__ : Tuple = tf.reshape(_snake_case ,(in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase ( self : Tuple ,_snake_case : Optional[int] ,_snake_case : str=ReturnType.TEXT ,_snake_case : str=False ) -> Any: """simple docstring""" lowercase__ : List[str] = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: lowercase__ : str = {f"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: lowercase__ : Any = { f"""{self.return_name}_text""": self.tokenizer.decode( _snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ,) } records.append(_snake_case ) return records @add_end_docstrings(lowerCamelCase__ ) class __A ( lowerCamelCase__ ): '''simple docstring''' lowerCAmelCase : List[Any] = 'summary' def __call__( self : str ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" return super().__call__(*_snake_case ,**_snake_case ) def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[int] ,_snake_case : Tuple ,_snake_case : Tuple ) -> Any: """simple docstring""" if max_length < min_length: logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f"""consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})""" ) @add_end_docstrings(lowerCamelCase__ ) class __A ( lowerCamelCase__ ): '''simple docstring''' lowerCAmelCase : Optional[Any] = 'translation' def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def UpperCAmelCase ( self : Tuple ,*_snake_case : int ,_snake_case : Dict=TruncationStrategy.DO_NOT_TRUNCATE ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=None ) -> List[str]: """simple docstring""" if getattr(self.tokenizer ,'''_build_translation_inputs''' ,_snake_case ): return self.tokenizer._build_translation_inputs( *_snake_case ,return_tensors=self.framework ,truncation=_snake_case ,src_lang=_snake_case ,tgt_lang=_snake_case ) else: return super()._parse_and_tokenize(*_snake_case ,truncation=_snake_case ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Tuple=None ,_snake_case : Optional[Any]=None ,**_snake_case : Optional[Any] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ , lowercase__ : Optional[Any] = super()._sanitize_parameters(**_snake_case ) if src_lang is not None: lowercase__ : Any = src_lang if tgt_lang is not None: lowercase__ : Any = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. lowercase__ : Optional[Any] = kwargs.get('''task''' ,self.task ) lowercase__ : Optional[int] = task.split('''_''' ) if task and len(_snake_case ) == 4: # translation, XX, to YY lowercase__ : str = items[1] lowercase__ : Optional[int] = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Optional[Any] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> int: """simple docstring""" return super().__call__(*_snake_case ,**_snake_case )
560
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
0
from typing import List import numpy as np def __lowerCAmelCase ( A_ : dict ) -> int: __UpperCAmelCase = {key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) __UpperCAmelCase = max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def __lowerCAmelCase ( A_ : int , A_ : int ) -> List[range]: __UpperCAmelCase = [] for group_idx in range(__snake_case ): __UpperCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break __UpperCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 __UpperCAmelCase = range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def __lowerCAmelCase ( A_ : dict , A_ : int ) -> List[dict]: __UpperCAmelCase = _number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: __UpperCAmelCase = _distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def __lowerCAmelCase ( A_ : List[dict] ) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowerCAmelCase ( A_ : np.random.Generator , A_ : dict ) -> dict: __UpperCAmelCase = {len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} __UpperCAmelCase = {} for size in list_sizes: __UpperCAmelCase = list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes __UpperCAmelCase = dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): __UpperCAmelCase = [value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
221
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _snake_case ( lowerCamelCase__ ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ): '''simple docstring''' if isinstance(self.unet.config.sample_size , _SCREAMING_SNAKE_CASE ): lowerCAmelCase = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCAmelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) lowerCAmelCase = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase = self.scheduler.step( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase = self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
284
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
0
'''simple docstring''' import requests UpperCamelCase__ : int = """""" # <-- Put your OpenWeatherMap appid here! UpperCamelCase__ : List[Any] = """https://api.openweathermap.org/data/2.5/""" def __UpperCamelCase( _A : str = "Chicago" , _A : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + '''weather''' , params=locals() ).json() def __UpperCamelCase( _A : str = "Kolkata, India" , _A : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + '''forecast''' , params=locals() ).json() def __UpperCamelCase( _A : float = 55.68 , _A : float = 12.57 , _A : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + '''onecall''' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCamelCase__ : Tuple = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
614
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
0
'''simple docstring''' _A: Dict = frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _A: Tuple = frozenset(["""prompt""", """negative_prompt"""]) _A: str = frozenset([]) _A: List[str] = frozenset(["""image"""]) _A: Union[str, Any] = frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _A: Optional[Any] = frozenset(["""image"""]) _A: Dict = frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _A: int = frozenset(["""prompt""", """image""", """negative_prompt"""]) _A: List[Any] = frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _A: Any = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _A: Optional[int] = frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _A: int = frozenset(["""image""", """mask_image"""]) _A: Optional[Any] = frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _A: Optional[Any] = frozenset(["""example_image""", """image""", """mask_image"""]) _A: str = frozenset(["""class_labels"""]) _A: Any = frozenset(["""class_labels"""]) _A: Optional[Any] = frozenset(["""batch_size"""]) _A: str = frozenset([]) _A: Dict = frozenset(["""batch_size"""]) _A: Any = frozenset([]) _A: int = frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _A: Optional[Any] = frozenset(["""prompt""", """negative_prompt"""]) _A: Tuple = frozenset(["""input_tokens"""]) _A: Dict = frozenset(["""input_tokens"""])
126
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
0
"""simple docstring""" import json import sys def _A ( __lowercase , __lowercase ): """simple docstring""" with open(__snake_case , encoding="""utf-8""" ) as f: lowerCamelCase__ = json.load(__snake_case ) lowerCamelCase__ = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """] for benchmark_name in sorted(__snake_case ): lowerCamelCase__ = results[benchmark_name] lowerCamelCase__ = benchmark_name.split("""/""" )[-1] output_md.append(f"""### Benchmark: {benchmark_file_name}""" ) lowerCamelCase__ = """| metric |""" lowerCamelCase__ = """|--------|""" lowerCamelCase__ = """| new / old (diff) |""" for metric_name in sorted(__snake_case ): lowerCamelCase__ = benchmark_res[metric_name] lowerCamelCase__ = metric_vals["""new"""] lowerCamelCase__ = metric_vals.get("""old""" , __snake_case ) lowerCamelCase__ = metric_vals.get("""diff""" , __snake_case ) lowerCamelCase__ = f""" {new_val:f}""" if isinstance(__snake_case , (int, float) ) else """None""" if old_val is not None: val_str += f""" / {old_val:f}""" if isinstance(__snake_case , (int, float) ) else "None" if dif_val is not None: val_str += f""" ({dif_val:f})""" if isinstance(__snake_case , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("""</details>""" ) with open(__snake_case , """w""" , encoding="""utf-8""" ) as f: f.writelines("""\n""".join(__snake_case ) ) if __name__ == "__main__": __magic_name__ = sys.argv[1] __magic_name__ = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
129
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
0
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise if not is_sharded: lowerCAmelCase__ : Optional[Any] = os.path.abspath(__snake_case ) logger.info(f"""Loading PyTorch weights from {pt_path}""" ) lowerCAmelCase__ : Dict = torch.load(__snake_case , map_location="""cpu""" ) logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" ) lowerCAmelCase__ : Any = convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCAmelCase__ : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): """simple docstring""" def is_key_or_prefix_key_in_dict(UpperCamelCase ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""scale""",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""mean""",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("""var""",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""embedding""",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCAmelCase__ : int = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : Optional[Any] = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCAmelCase__ : Optional[int] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : Dict = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCAmelCase__ : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[-2] + """_g""" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCAmelCase__ : Tuple = pt_tuple_key[-2] + """_v""" if name is not None: lowerCAmelCase__ : Optional[Any] = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCAmelCase__ : Optional[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCAmelCase__ : Dict = flax_model.params["""params"""] else: lowerCAmelCase__ : Optional[int] = flax_model.params lowerCAmelCase__ : int = flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCAmelCase__ : List[Any] = flatten_dict(flax_model.params["""batch_stats"""] ) random_flax_state_dict.update(__snake_case ) lowerCAmelCase__ : Dict = {} lowerCAmelCase__ : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) lowerCAmelCase__ : Dict = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : int = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary lowerCAmelCase__ : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase__ : List[Any] = pt_tuple_key[1:] # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCAmelCase__ : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase__ : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCAmelCase__ : List[Any] = jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCAmelCase__ : int = jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCAmelCase__ : Tuple = jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" import torch # Load the index lowerCAmelCase__ : Optional[int] = {} for shard_file in shard_filenames: # load using msgpack utils lowerCAmelCase__ : int = torch.load(__snake_case ) lowerCAmelCase__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCAmelCase__ : Dict = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCAmelCase__ : Tuple = flax_model.params["""params"""] lowerCAmelCase__ : str = flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) ) else: lowerCAmelCase__ : Optional[Any] = flax_model.params lowerCAmelCase__ : str = flatten_dict(__snake_case ) lowerCAmelCase__ : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) lowerCAmelCase__ : Union[str, Any] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : List[Any] = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary lowerCAmelCase__ : int = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase__ : Optional[int] = pt_tuple_key[1:] # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : Any = rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCAmelCase__ : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase__ : List[Any] = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCAmelCase__ : Tuple = jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCAmelCase__ : Dict = jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCAmelCase__ : int = jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCAmelCase__ : int = jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = os.path.abspath(__snake_case ) logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" ) # import correct flax class lowerCAmelCase__ : Optional[int] = getattr(__snake_case , """Flax""" + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , """rb""" ) as state_f: try: lowerCAmelCase__ : Optional[int] = from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Tuple = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCAmelCase__ : Dict = flatten_dict(__snake_case ) lowerCAmelCase__ : int = pt_model.state_dict() lowerCAmelCase__ : Optional[Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) lowerCAmelCase__ : Optional[Any] = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCAmelCase__ : str = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix lowerCAmelCase__ : Dict = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase__ : Optional[Any] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase__ : Optional[int] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCAmelCase__ : Tuple = flax_key_tuple[:-1] + ("""weight""",) lowerCAmelCase__ : int = jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCAmelCase__ : List[str] = flax_key_tuple[:-1] + ("""weight""",) lowerCAmelCase__ : int = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCAmelCase__ : List[str] = flax_key_tuple[:-1] + ("""weight""",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCAmelCase__ : int = flax_key_tuple[:-1] + ("""running_mean""",) elif "var" in flax_key_tuple[-1]: lowerCAmelCase__ : int = flax_key_tuple[:-1] + ("""running_var""",) if "batch_stats" in flax_state: lowerCAmelCase__ : List[Any] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCAmelCase__ : int = """.""".join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCAmelCase__ : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCAmelCase__ : List[Any] = key.split(""".""" ) lowerCAmelCase__ : Union[str, Any] = None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCAmelCase__ : List[Any] = key_components[-2] + """_g""" elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCAmelCase__ : int = key_components[-2] + """_v""" if name is not None: lowerCAmelCase__ : List[str] = key_components[:-3] + [name] lowerCAmelCase__ : List[Any] = """.""".join(__snake_case ) lowerCAmelCase__ : Optional[Any] = key if flax_key in special_pt_names: lowerCAmelCase__ : List[str] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : Dict = np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCAmelCase__ : Optional[Any] = torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCAmelCase__ : List[Any] = list(__snake_case ) if len(__snake_case ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) else: logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" ) if len(__snake_case ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) else: logger.warning( f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n""" """If your task is similar to the task the model of the checkpoint was trained on, """ f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" ) return pt_model
565
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule __A = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
93
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> List[str]: lowerCamelCase : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> List[Any]: for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Tuple = "" else: lowerCamelCase : Tuple = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : str = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : List[Any] = in_proj_bias[: config.hidden_size] lowerCamelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: lowerCamelCase : str = dct.pop(__snake_case ) lowerCamelCase : Optional[int] = val def A ( ) -> Dict: lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : List[Any] = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw ) return im @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: lowerCamelCase : Dict = DeiTConfig() # all deit models have fine-tuned heads lowerCamelCase : Any = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowerCamelCase : Optional[Any] = 1000 lowerCamelCase : List[str] = "huggingface/label-files" lowerCamelCase : Dict = "imagenet-1k-id2label.json" lowerCamelCase : Dict = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : str = {int(__snake_case ): v for k, v in idalabel.items()} lowerCamelCase : Union[str, Any] = idalabel lowerCamelCase : int = {v: k for k, v in idalabel.items()} lowerCamelCase : str = int(deit_name[-6:-4] ) lowerCamelCase : Any = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): lowerCamelCase : int = 192 lowerCamelCase : Optional[Any] = 768 lowerCamelCase : Optional[Any] = 12 lowerCamelCase : Optional[int] = 3 elif deit_name[9:].startswith("small" ): lowerCamelCase : Any = 384 lowerCamelCase : Tuple = 1536 lowerCamelCase : Optional[int] = 12 lowerCamelCase : Any = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): lowerCamelCase : Optional[Any] = 1024 lowerCamelCase : Optional[Any] = 4096 lowerCamelCase : int = 24 lowerCamelCase : Optional[Any] = 16 # load original model from timm lowerCamelCase : str = timm.create_model(__snake_case ,pretrained=__snake_case ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase : List[Any] = timm_model.state_dict() lowerCamelCase : Any = create_rename_keys(__snake_case ,__snake_case ) for src, dest in rename_keys: rename_key(__snake_case ,__snake_case ,__snake_case ) read_in_q_k_v(__snake_case ,__snake_case ,__snake_case ) # load HuggingFace model lowerCamelCase : Dict = DeiTForImageClassificationWithTeacher(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by DeiTImageProcessor lowerCamelCase : str = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowerCamelCase : List[str] = DeiTImageProcessor(size=__snake_case ,crop_size=config.image_size ) lowerCamelCase : Union[str, Any] = image_processor(images=prepare_img() ,return_tensors="pt" ) lowerCamelCase : Optional[Any] = encoding["pixel_values"] lowerCamelCase : List[str] = model(__snake_case ) lowerCamelCase : Union[str, Any] = timm_model(__snake_case ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__snake_case ,outputs.logits ,atol=1e-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
311
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' import functools from typing import Any def snake_case__ ( _A: str , _A: list[str] ) -> bool: '''simple docstring''' if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0: raise ValueError("""the string should be not empty string""" ) if not isinstance(__snake_case , __snake_case ) or not all( isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0 for item in words ): raise ValueError("""the words should be a list of non-empty strings""" ) # Build trie lowerCAmelCase = {} lowerCAmelCase = """WORD_KEEPER""" for word in words: lowerCAmelCase = trie for c in word: if c not in trie_node: lowerCAmelCase = {} lowerCAmelCase = trie_node[c] lowerCAmelCase = True lowerCAmelCase = len(__snake_case ) # Dynamic programming method @functools.cache def is_breakable(_A: int ) -> bool: if index == len_string: return True lowerCAmelCase = trie for i in range(__snake_case , __snake_case ): lowerCAmelCase = trie_node.get(string[i] , __snake_case ) if trie_node is None: return False if trie_node.get(__snake_case , __snake_case ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
370
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
0
import math import random def __A ( _lowercase , _lowercase = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __A = 0.02 def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = float(2 * (random.randint(1 , 1_00 )) - 1 ) for _ in range(__snake_case ): # Forward propagation _A = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? _A = (expected / 1_00) - layer_a # Error delta _A = layer_1_error * sigmoid_function(__snake_case , __snake_case ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() __A = int(input('Expected value: ')) __A = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
484
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class __A ( lowerCamelCase__ ): '''simple docstring''' lowerCAmelCase : int = 'van' def __init__( self : Any ,_snake_case : List[Any]=224 ,_snake_case : List[Any]=3 ,_snake_case : Tuple=[7, 3, 3, 3] ,_snake_case : Union[str, Any]=[4, 2, 2, 2] ,_snake_case : List[Any]=[64, 128, 320, 512] ,_snake_case : Dict=[3, 3, 12, 3] ,_snake_case : Any=[8, 8, 4, 4] ,_snake_case : int="gelu" ,_snake_case : Optional[int]=0.02 ,_snake_case : Optional[int]=1e-6 ,_snake_case : Any=1e-2 ,_snake_case : List[str]=0.0 ,_snake_case : Dict=0.0 ,**_snake_case : Union[str, Any] ,) -> Optional[int]: """simple docstring""" super().__init__(**_snake_case ) lowercase__ : Union[str, Any] = image_size lowercase__ : Union[str, Any] = num_channels lowercase__ : str = patch_sizes lowercase__ : List[Any] = strides lowercase__ : Union[str, Any] = hidden_sizes lowercase__ : List[str] = depths lowercase__ : List[str] = mlp_ratios lowercase__ : Union[str, Any] = hidden_act lowercase__ : Optional[int] = initializer_range lowercase__ : str = layer_norm_eps lowercase__ : Dict = layer_scale_init_value lowercase__ : List[str] = drop_path_rate lowercase__ : Any = dropout_rate
560
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig a_ = logging.get_logger(__name__) # General docstring a_ = """RegNetConfig""" # Base docstring a_ = """facebook/regnet-y-040""" a_ = [1, 1088, 7, 7] # Image classification docstring a_ = """facebook/regnet-y-040""" a_ = """tabby, tabby cat""" a_ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self: Optional[int] , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: str = 3 , __lowerCAmelCase: List[str] = 1 , __lowerCAmelCase: List[str] = 1 , __lowerCAmelCase: Optional[int] = "relu" , ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase = nn.Convad( __lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , groups=__lowerCAmelCase , bias=__lowerCAmelCase , ) __UpperCAmelCase = nn.BatchNormad(__lowerCAmelCase ) __UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity() def _UpperCAmelCase ( self: int , __lowerCAmelCase: Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase = self.convolution(__lowerCAmelCase ) __UpperCAmelCase = self.normalization(__lowerCAmelCase ) __UpperCAmelCase = self.activation(__lowerCAmelCase ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self: Any , __lowerCAmelCase: str ) -> List[Any]: '''simple docstring''' super().__init__() __UpperCAmelCase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __UpperCAmelCase = config.num_channels def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: List[str] ) -> Any: '''simple docstring''' __UpperCAmelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) __UpperCAmelCase = self.embedder(__lowerCAmelCase ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self: Any , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: List[str] = 2 ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase ) __UpperCAmelCase = nn.BatchNormad(__lowerCAmelCase ) def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Dict ) -> int: '''simple docstring''' __UpperCAmelCase = self.convolution(__lowerCAmelCase ) __UpperCAmelCase = self.normalization(__lowerCAmelCase ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self: Union[str, Any] , __lowerCAmelCase: str , __lowerCAmelCase: Tuple ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) __UpperCAmelCase = nn.Sequential( nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , nn.Sigmoid() , ) def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: str ) -> str: '''simple docstring''' __UpperCAmelCase = self.pooler(__lowerCAmelCase ) __UpperCAmelCase = self.attention(__lowerCAmelCase ) __UpperCAmelCase = hidden_state * attention return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self: str , __lowerCAmelCase: Tuple , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Any = 1 ) -> Any: '''simple docstring''' super().__init__() __UpperCAmelCase = in_channels != out_channels or stride != 1 __UpperCAmelCase = max(1 , out_channels // config.groups_width ) __UpperCAmelCase = ( RegNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) __UpperCAmelCase = nn.Sequential( RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , ) __UpperCAmelCase = ACTaFN[config.hidden_act] def _UpperCAmelCase ( self: int , __lowerCAmelCase: Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase = hidden_state __UpperCAmelCase = self.layer(__lowerCAmelCase ) __UpperCAmelCase = self.shortcut(__lowerCAmelCase ) hidden_state += residual __UpperCAmelCase = self.activation(__lowerCAmelCase ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self: Union[str, Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Union[str, Any] = 1 ) -> int: '''simple docstring''' super().__init__() __UpperCAmelCase = in_channels != out_channels or stride != 1 __UpperCAmelCase = max(1 , out_channels // config.groups_width ) __UpperCAmelCase = ( RegNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) __UpperCAmelCase = nn.Sequential( RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act ) , RegNetSELayer(__lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , ) __UpperCAmelCase = ACTaFN[config.hidden_act] def _UpperCAmelCase ( self: int , __lowerCAmelCase: Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase = hidden_state __UpperCAmelCase = self.layer(__lowerCAmelCase ) __UpperCAmelCase = self.shortcut(__lowerCAmelCase ) hidden_state += residual __UpperCAmelCase = self.activation(__lowerCAmelCase ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self: List[str] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Dict , __lowerCAmelCase: Any , __lowerCAmelCase: List[Any] = 2 , __lowerCAmelCase: Optional[Any] = 2 , ) -> Any: '''simple docstring''' super().__init__() __UpperCAmelCase = RegNetXLayer if config.layer_type == "x" else RegNetYLayer __UpperCAmelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for _ in range(depth - 1 )] , ) def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: int ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase = self.layers(__lowerCAmelCase ) return hidden_state class UpperCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self: List[str] , __lowerCAmelCase: Tuple ) -> Tuple: '''simple docstring''' super().__init__() __UpperCAmelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( __lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ): self.stages.append(RegNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) ) def _UpperCAmelCase ( self: int , __lowerCAmelCase: int , __lowerCAmelCase: List[str] = False , __lowerCAmelCase: Any = True ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __UpperCAmelCase = hidden_states + (hidden_state,) __UpperCAmelCase = stage_module(__lowerCAmelCase ) if output_hidden_states: __UpperCAmelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase ) class UpperCAmelCase__ ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ : List[str] = RegNetConfig lowerCAmelCase__ : Tuple = 'regnet' lowerCAmelCase__ : int = 'pixel_values' lowerCAmelCase__ : int = True def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: Tuple ) -> Dict: '''simple docstring''' if isinstance(__lowerCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _UpperCAmelCase ( self: int , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Dict=False ) -> Optional[Any]: '''simple docstring''' if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __UpperCAmelCase = value a_ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , lowerCamelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class UpperCAmelCase__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self: List[Any] , __lowerCAmelCase: str ) -> Dict: '''simple docstring''' super().__init__(__lowerCAmelCase ) __UpperCAmelCase = config __UpperCAmelCase = RegNetEmbeddings(__lowerCAmelCase ) __UpperCAmelCase = RegNetEncoder(__lowerCAmelCase ) __UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _UpperCAmelCase ( self: int , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Union[str, Any] = None , __lowerCAmelCase: Tuple = None ) -> str: '''simple docstring''' __UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict __UpperCAmelCase = self.embedder(__lowerCAmelCase ) __UpperCAmelCase = self.encoder( __lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) __UpperCAmelCase = encoder_outputs[0] __UpperCAmelCase = self.pooler(__lowerCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCamelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class UpperCAmelCase__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self: Dict , __lowerCAmelCase: Optional[int] ) -> Any: '''simple docstring''' super().__init__(__lowerCAmelCase ) __UpperCAmelCase = config.num_labels __UpperCAmelCase = RegNetModel(__lowerCAmelCase ) # classification head __UpperCAmelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Union[str, Any] = None , __lowerCAmelCase: Tuple = None , __lowerCAmelCase: Dict = None , __lowerCAmelCase: List[Any] = None , ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict __UpperCAmelCase = self.regnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) __UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1] __UpperCAmelCase = self.classifier(__lowerCAmelCase ) __UpperCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __UpperCAmelCase = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __UpperCAmelCase = "single_label_classification" else: __UpperCAmelCase = "multi_label_classification" if self.config.problem_type == "regression": __UpperCAmelCase = MSELoss() if self.num_labels == 1: __UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __UpperCAmelCase = loss_fct(__lowerCAmelCase , __lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": __UpperCAmelCase = CrossEntropyLoss() __UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __UpperCAmelCase = BCEWithLogitsLoss() __UpperCAmelCase = loss_fct(__lowerCAmelCase , __lowerCAmelCase ) if not return_dict: __UpperCAmelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
221
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCamelCase : Dict = logging.getLogger(__name__) @dataclass class _snake_case : SCREAMING_SNAKE_CASE : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE : Optional[str] = field( default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class _snake_case : SCREAMING_SNAKE_CASE : str = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowerCamelCase__ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , ) SCREAMING_SNAKE_CASE : int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE : bool = field( default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def snake_case ( ) -> int: """simple docstring""" lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) lowerCAmelCase = import_module('tasks' ) try: lowerCAmelCase = getattr(__snake_case , model_args.task_type ) lowerCAmelCase = token_classification_task_clazz() except AttributeError: raise ValueError( F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __snake_case ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowerCAmelCase = token_classification_task.get_labels(data_args.labels ) lowerCAmelCase = dict(enumerate(__snake_case ) ) lowerCAmelCase = len(__snake_case ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , idalabel=__snake_case , labelaid={label: i for i, label in enumerate(__snake_case )} , cache_dir=model_args.cache_dir , ) lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) lowerCAmelCase = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , ) # Get datasets lowerCAmelCase = ( TokenClassificationDataset( token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCAmelCase = ( TokenClassificationDataset( token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(snake_case : np.ndarray , snake_case : np.ndarray ) -> Tuple[List[int], List[int]]: lowerCAmelCase = np.argmax(__snake_case , axis=2 ) lowerCAmelCase , lowerCAmelCase = preds.shape lowerCAmelCase = [[] for _ in range(__snake_case )] lowerCAmelCase = [[] for _ in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(snake_case : EvalPrediction ) -> Dict: lowerCAmelCase , lowerCAmelCase = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(__snake_case , __snake_case ), "precision": precision_score(__snake_case , __snake_case ), "recall": recall_score(__snake_case , __snake_case ), "f1": fa_score(__snake_case , __snake_case ), } # Data collator lowerCAmelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCAmelCase = Trainer( model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCAmelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) lowerCAmelCase = trainer.evaluate() lowerCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(__snake_case , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __snake_case , __snake_case ) writer.write('%s = %s\n' % (key, value) ) results.update(__snake_case ) # Predict if training_args.do_predict: lowerCAmelCase = TokenClassificationDataset( token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = trainer.predict(__snake_case ) lowerCAmelCase , lowerCAmelCase = align_predictions(__snake_case , __snake_case ) lowerCAmelCase = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(__snake_case , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , __snake_case , __snake_case ) writer.write('%s = %s\n' % (key, value) ) # Save predictions lowerCAmelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(__snake_case , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(__snake_case , __snake_case , __snake_case ) return results def snake_case ( snake_case : Dict ) -> Tuple: """simple docstring""" main() if __name__ == "__main__": main()
284
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class _lowercase ( lowerCamelCase__ ): '''simple docstring''' UpperCAmelCase_ : str = 'rwkv' UpperCAmelCase_ : List[Any] = {'max_position_embeddings': 'context_length'} def __init__( self ,lowerCamelCase_=50277 ,lowerCamelCase_=1024 ,lowerCamelCase_=4096 ,lowerCamelCase_=32 ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=1e-5 ,lowerCamelCase_=0 ,lowerCamelCase_=0 ,lowerCamelCase_=6 ,lowerCamelCase_=False ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> Any: '''simple docstring''' UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : Optional[Any] = context_length UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCAmelCase__ : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCAmelCase__ : Optional[int] = layer_norm_epsilon UpperCAmelCase__ : str = rescale_every UpperCAmelCase__ : List[Any] = use_cache UpperCAmelCase__ : Union[str, Any] = bos_token_id UpperCAmelCase__ : List[str] = eos_token_id super().__init__( tie_word_embeddings=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
614
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _A: int = logging.get_logger(__name__) class UpperCAmelCase ( lowerCamelCase__ ): _A : Tuple = ['pixel_values'] def __init__( self , __A = True , __A = None , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ): super().__init__(**__A ) __UpperCAmelCase = size if size is not None else {'shortest_edge': 384} __UpperCAmelCase = get_size_dict(__A , default_to_square=__A ) __UpperCAmelCase = do_resize __UpperCAmelCase = size # Default value set here for backwards compatibility where the value in config is None __UpperCAmelCase = crop_pct if crop_pct is not None else 224 / 256 __UpperCAmelCase = resample __UpperCAmelCase = do_rescale __UpperCAmelCase = rescale_factor __UpperCAmelCase = do_normalize __UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCamelCase ( self , __A , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ): __UpperCAmelCase = get_size_dict(__A , default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) __UpperCAmelCase = size['shortest_edge'] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __UpperCAmelCase = int(shortest_edge / crop_pct ) __UpperCAmelCase = get_resize_output_image_size(__A , size=__A , default_to_square=__A ) __UpperCAmelCase = resize(image=__A , size=__A , resample=__A , data_format=__A , **__A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__A , size=(shortest_edge, shortest_edge) , data_format=__A , **__A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __A , size=(shortest_edge, shortest_edge) , resample=__A , data_format=__A , **__A ) def __lowerCamelCase ( self , __A , __A , __A = None , **__A , ): return rescale(__A , scale=__A , data_format=__A , **__A ) def __lowerCamelCase ( self , __A , __A , __A , __A = None , **__A , ): return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def __lowerCamelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ): __UpperCAmelCase = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct __UpperCAmelCase = resample if resample is not None else self.resample __UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase = image_std if image_std is not None else self.image_std __UpperCAmelCase = size if size is not None else self.size __UpperCAmelCase = get_size_dict(__A , default_to_square=__A ) __UpperCAmelCase = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __UpperCAmelCase = [to_numpy_array(__A ) for image in images] if do_resize: __UpperCAmelCase = [self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images] if do_rescale: __UpperCAmelCase = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: __UpperCAmelCase = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] __UpperCAmelCase = [to_channel_dimension_format(__A , __A ) for image in images] __UpperCAmelCase = {'pixel_values': images} return BatchFeature(data=__A , tensor_type=__A )
126
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
0
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): snake_case = StableUnCLIPPipeline snake_case = TEXT_TO_IMAGE_PARAMS snake_case = TEXT_TO_IMAGE_BATCH_PARAMS snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false snake_case = False def __UpperCAmelCase ( self : Tuple ): lowerCamelCase__ = 32 lowerCamelCase__ = embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=SCREAMING_SNAKE_CASE_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase__ = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=SCREAMING_SNAKE_CASE_ , num_layers=1 , ) torch.manual_seed(0 ) lowerCamelCase__ = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase__ = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowerCamelCase__ = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase__ = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE_ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE_ , use_linear_projection=SCREAMING_SNAKE_CASE_ , ) torch.manual_seed(0 ) lowerCamelCase__ = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , ) torch.manual_seed(0 ) lowerCamelCase__ = AutoencoderKL() lowerCamelCase__ = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]=0 ): if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ): lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __UpperCAmelCase ( self : Optional[int] ): lowerCamelCase__ = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Optional[Any] ): lowerCamelCase__ = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE_ ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __UpperCAmelCase ( self : Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Union[str, Any] ): lowerCamelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) lowerCamelCase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCamelCase__ = pipe("""anime turle""" , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" ) lowerCamelCase__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Dict ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase__ = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) lowerCamelCase__ = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
129
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
0
'''simple docstring''' import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : int = question_encoder lowerCAmelCase__ : Any = generator lowerCAmelCase__ : Tuple = self.question_encoder def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if os.path.isfile(__UpperCAmelCase ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = os.path.join(__UpperCAmelCase ,"""question_encoder_tokenizer""" ) lowerCAmelCase__ : int = os.path.join(__UpperCAmelCase ,"""generator_tokenizer""" ) self.question_encoder.save_pretrained(__UpperCAmelCase ) self.generator.save_pretrained(__UpperCAmelCase ) @classmethod def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: from ..auto.tokenization_auto import AutoTokenizer lowerCAmelCase__ : int = kwargs.pop("""config""" ,__UpperCAmelCase ) if config is None: lowerCAmelCase__ : Union[str, Any] = RagConfig.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained( __UpperCAmelCase ,config=config.question_encoder ,subfolder="""question_encoder_tokenizer""" ) lowerCAmelCase__ : int = AutoTokenizer.from_pretrained( __UpperCAmelCase ,config=config.generator ,subfolder="""generator_tokenizer""" ) return cls(question_encoder=__UpperCAmelCase ,generator=__UpperCAmelCase ) def __call__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: return self.current_tokenizer(*__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: return self.generator.batch_decode(*__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: return self.generator.decode(*__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Any = self.question_encoder def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : List[Any] = self.generator def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "longest" ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,**__UpperCAmelCase ,) -> Optional[int]: warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" ,__UpperCAmelCase ,) if max_length is None: lowerCAmelCase__ : Optional[Any] = self.current_tokenizer.model_max_length lowerCAmelCase__ : Any = self( __UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,max_length=__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,**__UpperCAmelCase ,) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCAmelCase__ : Optional[int] = self.current_tokenizer.model_max_length lowerCAmelCase__ : Dict = self( text_target=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,padding=__UpperCAmelCase ,max_length=__UpperCAmelCase ,truncation=__UpperCAmelCase ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[Any] = labels["""input_ids"""] return model_inputs
565
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class _lowerCAmelCase ( lowerCamelCase__ ): """simple docstring""" __magic_name__ :torch.FloatTensor class _lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" @register_to_config def __init__( self , __UpperCAmelCase = 1_6 , __UpperCAmelCase = 8_8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = True , __UpperCAmelCase = True , ): '''simple docstring''' super().__init__() lowerCAmelCase__ :int = num_attention_heads lowerCAmelCase__ :Optional[int] = attention_head_dim lowerCAmelCase__ :Any = num_attention_heads * attention_head_dim lowerCAmelCase__ :List[str] = in_channels lowerCAmelCase__ :Tuple = torch.nn.GroupNorm(num_groups=__UpperCAmelCase , num_channels=__UpperCAmelCase , eps=1E-6 , affine=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) # 3. Define transformers blocks lowerCAmelCase__ :int = nn.ModuleList( [ BasicTransformerBlock( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , activation_fn=__UpperCAmelCase , attention_bias=__UpperCAmelCase , double_self_attention=__UpperCAmelCase , norm_elementwise_affine=__UpperCAmelCase , ) for d in range(__UpperCAmelCase ) ] ) lowerCAmelCase__ :Optional[int] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase = True , ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = hidden_states.shape lowerCAmelCase__ :List[str] = batch_frames // num_frames lowerCAmelCase__ :List[str] = hidden_states lowerCAmelCase__ :Union[str, Any] = hidden_states[None, :].reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCAmelCase__ :int = self.norm(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = self.proj_in(__UpperCAmelCase ) # 2. Blocks for block in self.transformer_blocks: lowerCAmelCase__ :Union[str, Any] = block( __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , class_labels=__UpperCAmelCase , ) # 3. Output lowerCAmelCase__ :Dict = self.proj_out(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = ( hidden_states[None, None, :] .reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCAmelCase__ :int = hidden_states.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=__UpperCAmelCase )
93
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) @dataclass class UpperCamelCase__ (lowerCamelCase__ ): '''simple docstring''' lowerCamelCase_ : List[Any] = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self , **UpperCamelCase__ ) -> str: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowerCamelCase : Any = deprecated_arg[3:] lowerCamelCase : Dict = not kwargs.pop(UpperCamelCase__ ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) lowerCamelCase : Union[str, Any] = kwargs.pop("tpu_name" , self.tpu_name ) lowerCamelCase : List[Any] = kwargs.pop("device_idx" , self.device_idx ) lowerCamelCase : Any = kwargs.pop("eager_mode" , self.eager_mode ) lowerCamelCase : Any = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**UpperCamelCase__ ) lowerCamelCase_ : str = field( default=lowerCamelCase__ , metadata={"""help""": """Name of TPU"""} , ) lowerCamelCase_ : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) lowerCamelCase_ : bool = field(default=lowerCamelCase__ , metadata={"""help""": """Benchmark models in eager model."""} ) lowerCamelCase_ : bool = field( default=lowerCamelCase__ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def _lowercase ( self ) -> List[str]: requires_backends(self , ["tf"] ) lowerCamelCase : Optional[Any] = None if self.tpu: try: if self.tpu_name: lowerCamelCase : Dict = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowerCamelCase : str = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowerCamelCase : Tuple = None return tpu @cached_property def _lowercase ( self ) -> Tuple: requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowerCamelCase : str = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) lowerCamelCase : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU lowerCamelCase : int = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def _lowercase ( self ) -> Optional[int]: requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def _lowercase ( self ) -> Dict: requires_backends(self , ["tf"] ) return self._setup_strategy @property def _lowercase ( self ) -> Optional[Any]: requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def _lowercase ( self ) -> Any: requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _lowercase ( self ) -> Any: return self.n_gpu > 0
311
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
0
'''simple docstring''' from maths.prime_check import is_prime def snake_case__ ( _A: int ) -> int: '''simple docstring''' if not isinstance(__snake_case , __snake_case ): lowerCAmelCase = f"Input value of [number={number}] must be an integer" raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
370
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
0
def __A ( _lowercase ): '''simple docstring''' if len(__snake_case ) <= 1: return [tuple(__snake_case )] _A = [] def generate(_lowercase , _lowercase ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , __snake_case ) for i in range(k - 1 ): if k % 2 == 0: # k is even _A ,_A = arr[k - 1], arr[i] else: # k is odd _A ,_A = arr[k - 1], arr[0] generate(k - 1 , __snake_case ) generate(len(__snake_case ) , __snake_case ) return res if __name__ == "__main__": __A = input('Enter numbers separated by a comma:\n').strip() __A = [int(item) for item in user_input.split(',')] print(heaps(arr))
484
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
560
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
0
from __future__ import annotations import numpy as np def __lowerCAmelCase ( A_ : list[float] ) -> str: return np.maximum(0 , __snake_case ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
221
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
0
'''simple docstring''' from __future__ import annotations _UpperCamelCase : List[Any] = 1.6_0_2_1e-1_9 # units = C def snake_case ( snake_case : float , snake_case : float , snake_case : float , ) -> tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
284
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase__ : int = { """configuration_clip""": [ """CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPConfig""", """CLIPOnnxConfig""", """CLIPTextConfig""", """CLIPVisionConfig""", ], """processing_clip""": ["""CLIPProcessor"""], """tokenization_clip""": ["""CLIPTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ["""CLIPTokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = ["""CLIPFeatureExtractor"""] UpperCamelCase__ : Union[str, Any] = ["""CLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ """CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPModel""", """CLIPPreTrainedModel""", """CLIPTextModel""", """CLIPTextModelWithProjection""", """CLIPVisionModel""", """CLIPVisionModelWithProjection""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = [ """TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCLIPModel""", """TFCLIPPreTrainedModel""", """TFCLIPTextModel""", """TFCLIPVisionModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ """FlaxCLIPModel""", """FlaxCLIPPreTrainedModel""", """FlaxCLIPTextModel""", """FlaxCLIPTextPreTrainedModel""", """FlaxCLIPVisionModel""", """FlaxCLIPVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
614
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
0
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase : def __init__( self , __A , __A=3 , __A=32 , __A=3 , __A=10 , __A=[8, 16, 32, 64] , __A=[1, 1, 2, 1] , __A=True , __A=True , __A="relu" , __A=3 , __A=None , __A=["stage2", "stage3", "stage4"] , __A=[2, 3, 4] , __A=1 , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = image_size __UpperCAmelCase = num_channels __UpperCAmelCase = embeddings_size __UpperCAmelCase = hidden_sizes __UpperCAmelCase = depths __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = hidden_act __UpperCAmelCase = num_labels __UpperCAmelCase = scope __UpperCAmelCase = len(__A ) __UpperCAmelCase = out_features __UpperCAmelCase = out_indices __UpperCAmelCase = num_groups def __lowerCamelCase ( self ): __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def __lowerCamelCase ( self , __A , __A , __A ): __UpperCAmelCase = BitModel(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowerCamelCase ( self , __A , __A , __A ): __UpperCAmelCase = self.num_labels __UpperCAmelCase = BitForImageClassification(__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , __A , __A , __A ): __UpperCAmelCase = BitBackbone(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase = None __UpperCAmelCase = BitBackbone(config=__A ) model.to(__A ) model.eval() __UpperCAmelCase = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): _A : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _A : Union[str, Any] = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : int = False _A : Union[str, Any] = False _A : Optional[int] = False _A : Tuple = False def __lowerCamelCase ( self ): __UpperCAmelCase = BitModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __lowerCamelCase ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCamelCase ( self ): return @unittest.skip(reason='Bit does not output attentions' ) def __lowerCamelCase ( self ): pass @unittest.skip(reason='Bit does not use inputs_embeds' ) def __lowerCamelCase ( self ): pass @unittest.skip(reason='Bit does not support input and output embeddings' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(__A ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__A ) def __lowerCamelCase ( self ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(config=__A ) for name, module in model.named_modules(): if isinstance(__A , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def __lowerCamelCase ( self ): def check_hidden_states_output(__A , __A , __A ): __UpperCAmelCase = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(__A , __A ) ) __UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase = self.model_tester.num_stages self.assertEqual(len(__A ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCAmelCase = layer_type __UpperCAmelCase = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True check_hidden_states_output(__A , __A , __A ) @unittest.skip(reason='Bit does not use feedforward chunking' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def __lowerCamelCase ( self ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = BitModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def _lowerCAmelCase ( )-> int: __UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): @cached_property def __lowerCamelCase ( self ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowerCamelCase ( self ): __UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = image_processor(images=__A , return_tensors='pt' ).to(__A ) # forward pass with torch.no_grad(): __UpperCAmelCase = model(**__A ) # verify the logits __UpperCAmelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __A ) __UpperCAmelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) ) @require_torch class UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): _A : Any = (BitBackbone,) if is_torch_available() else () _A : Union[str, Any] = BitConfig _A : Union[str, Any] = False def __lowerCamelCase ( self ): __UpperCAmelCase = BitModelTester(self )
126
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
0
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __magic_name__ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ): def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Dict ): super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) self.check_model_type(SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Tuple ): lowerCamelCase__ , lowerCamelCase__ = {}, {} if padding is not None: lowerCamelCase__ = padding if truncation is not None: lowerCamelCase__ = truncation if top_k is not None: lowerCamelCase__ = top_k return preprocess_params, {}, postprocess_params def __call__( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] = None , **SCREAMING_SNAKE_CASE_ : List[Any] ): if isinstance(SCREAMING_SNAKE_CASE_ , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase__ = {"""image""": image, """question""": question} else: lowerCamelCase__ = image lowerCamelCase__ = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return results def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[str]=False ): lowerCamelCase__ = load_image(inputs["""image"""] ) lowerCamelCase__ = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE_ ) return model_inputs def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ): lowerCamelCase__ = self.model(**SCREAMING_SNAKE_CASE_ ) return model_outputs def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=5 ): if top_k > self.model.config.num_labels: lowerCamelCase__ = self.model.config.num_labels if self.framework == "pt": lowerCamelCase__ = model_outputs.logits.sigmoid()[0] lowerCamelCase__ , lowerCamelCase__ = probs.topk(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) lowerCamelCase__ = scores.tolist() lowerCamelCase__ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
129
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
0
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger() def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = True ): """simple docstring""" print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": lowerCAmelCase__ : List[Any] = timm.create_model("""levit_128s""" , pretrained=__snake_case ) else: lowerCAmelCase__ : List[Any] = timm.create_model("""levit_128""" , pretrained=__snake_case ) if hidden_sizes == 192: lowerCAmelCase__ : Any = timm.create_model("""levit_192""" , pretrained=__snake_case ) if hidden_sizes == 256: lowerCAmelCase__ : Union[str, Any] = timm.create_model("""levit_256""" , pretrained=__snake_case ) if hidden_sizes == 384: lowerCAmelCase__ : List[str] = timm.create_model("""levit_384""" , pretrained=__snake_case ) from_model.eval() lowerCAmelCase__ : Optional[Any] = LevitForImageClassificationWithTeacher(__snake_case ).eval() lowerCAmelCase__ : str = OrderedDict() lowerCAmelCase__ : Optional[Any] = from_model.state_dict() lowerCAmelCase__ : Any = list(from_model.state_dict().keys() ) lowerCAmelCase__ : List[str] = list(our_model.state_dict().keys() ) print(len(__snake_case ) , len(__snake_case ) ) for i in range(len(__snake_case ) ): lowerCAmelCase__ : Union[str, Any] = weights[og_keys[i]] our_model.load_state_dict(__snake_case ) lowerCAmelCase__ : Any = torch.randn((2, 3, 224, 224) ) lowerCAmelCase__ : str = from_model(__snake_case ) lowerCAmelCase__ : Tuple = our_model(__snake_case ).logits assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one." lowerCAmelCase__ : Optional[Any] = name print(__snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowerCAmelCase__ : List[Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = None , UpperCamelCase = True ): """simple docstring""" lowerCAmelCase__ : List[Any] = """imagenet-1k-id2label.json""" lowerCAmelCase__ : Tuple = 1000 lowerCAmelCase__ : Optional[Any] = (1, num_labels) lowerCAmelCase__ : Any = """huggingface/label-files""" lowerCAmelCase__ : Union[str, Any] = num_labels lowerCAmelCase__ : List[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) ) lowerCAmelCase__ : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()} lowerCAmelCase__ : str = idalabel lowerCAmelCase__ : Dict = {v: k for k, v in idalabel.items()} lowerCAmelCase__ : Optional[int] = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case ) lowerCAmelCase__ : str = { """levit-128S""": 128, """levit-128""": 128, """levit-192""": 192, """levit-256""": 256, """levit-384""": 384, } lowerCAmelCase__ : Optional[int] = { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , __snake_case , names_to_config[model_name] , __snake_case , __snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , __snake_case , __snake_case , __snake_case , __snake_case ) return config, expected_shape if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
565
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
0
"""simple docstring""" import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration __A = 5_0000 __A = 5000 __A = os.path.split(__file__) __A = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" for i in range(__snake_case ): lowerCAmelCase__ :Dict = dataset[i] @get_duration def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" for i in range(0 , len(__snake_case ) , __snake_case ): lowerCAmelCase__ :Dict = dataset[i : i + batch_size] @get_duration def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" with dataset.formatted_as(type=__snake_case ): for i in range(__snake_case ): lowerCAmelCase__ :List[str] = dataset[i] @get_duration def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" with dataset.formatted_as(type=__snake_case ): for i in range(0 , __snake_case , __snake_case ): lowerCAmelCase__ :Optional[Any] = dataset[i : i + batch_size] def __A () ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :Dict = {'num examples': SPEED_TEST_N_EXAMPLES} lowerCAmelCase__ :Union[str, Any] = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] lowerCAmelCase__ :Optional[Any] = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) lowerCAmelCase__ :Optional[int] = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) lowerCAmelCase__ :Optional[int] = generate_example_dataset( os.path.join(__snake_case , 'dataset.arrow' ) , __snake_case , num_examples=__snake_case , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(__snake_case ) ) lowerCAmelCase__ :List[str] = func(__snake_case , **__snake_case ) print('shuffling dataset' ) lowerCAmelCase__ :Optional[Any] = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(__snake_case ) ) lowerCAmelCase__ :Tuple = func( __snake_case , **__snake_case ) with open(__snake_case , 'wb' ) as f: f.write(json.dumps(__snake_case ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
93
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
0
from __future__ import annotations def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> None: lowerCamelCase : Optional[Any] = len(__snake_case ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(__snake_case ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,__snake_case ,__snake_case ,) def A ( _SCREAMING_SNAKE_CASE ) -> None: lowerCamelCase : int = [] depth_first_search([] ,[] ,[] ,__snake_case ,__snake_case ) # Print all the boards for board in boards: for column in board: print(__snake_case ) print("" ) print(len(__snake_case ) ,"solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
311
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer __lowercase = logging.get_logger(__name__) __lowercase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __lowercase = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } __lowercase = { """Salesforce/codegen-350M-mono""": 2_0_4_8, } class a__( lowerCamelCase__ ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES UpperCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Dict = ['input_ids', 'attention_mask'] UpperCAmelCase_ : Union[str, Any] = CodeGenTokenizer def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , **__lowerCAmelCase , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , ) if kwargs.pop("""add_bos_token""" , __lowerCAmelCase): lowerCAmelCase = kwargs.pop("""name_or_path""" , """""") raise ValueError( """Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n""" f"`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n" f"`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n" """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""") lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , __lowerCAmelCase) != add_prefix_space: lowerCAmelCase = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""")) lowerCAmelCase = add_prefix_space lowerCAmelCase = pre_tok_class(**__lowerCAmelCase) lowerCAmelCase = add_prefix_space def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase) return tuple(__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = super().decode( token_ids=__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , **__lowerCAmelCase , ) if truncate_before_pattern is not None and len(__lowerCAmelCase) > 0: lowerCAmelCase = self.truncate(__lowerCAmelCase , __lowerCAmelCase) return decoded_text def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" def find_re(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): lowerCAmelCase = pattern.search(__lowerCAmelCase , __lowerCAmelCase) return m.start() if m else -1 lowerCAmelCase = [re.compile(__lowerCAmelCase , re.MULTILINE) for pattern in truncate_before_pattern] lowerCAmelCase = list(re.finditer("""^print""" , __lowerCAmelCase , re.MULTILINE)) if len(__lowerCAmelCase) > 1: lowerCAmelCase = completion[: prints[1].start()] lowerCAmelCase = list(re.finditer("""^def""" , __lowerCAmelCase , re.MULTILINE)) if len(__lowerCAmelCase) > 1: lowerCAmelCase = completion[: defs[1].start()] lowerCAmelCase = 0 lowerCAmelCase = [ pos for pos in [find_re(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) for terminal in terminals] if pos != -1 ] if len(__lowerCAmelCase) > 0: return completion[: min(__lowerCAmelCase)] else: return completion
370
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
0
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __A = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def __A ( _lowercase ): '''simple docstring''' _A = {} state_dict.pop('''pixel_mean''' , __snake_case ) state_dict.pop('''pixel_std''' , __snake_case ) _A = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _A = key.replace(__snake_case , __snake_case ) if re.match(__snake_case , __snake_case ): _A = int(re.match(__snake_case , __snake_case ).group(2 ) ) if layer_nb == 0: _A = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: _A = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: _A = key.replace('''layers.2''' , '''proj_out''' ) _A = value _A = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def __A ( _lowercase , _lowercase , _lowercase , _lowercase="ybelkada/segment-anything" ): '''simple docstring''' _A = hf_hub_download(__snake_case , f"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: _A = SamConfig() elif "sam_vit_l" in model_name: _A = SamVisionConfig( hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) _A = SamConfig( vision_config=__snake_case , ) elif "sam_vit_h" in model_name: _A = SamVisionConfig( hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) _A = SamConfig( vision_config=__snake_case , ) _A = torch.load(__snake_case , map_location='''cpu''' ) _A = replace_keys(__snake_case ) _A = SamImageProcessor() _A = SamProcessor(image_processor=__snake_case ) _A = SamModel(__snake_case ) hf_model.load_state_dict(__snake_case ) _A = hf_model.to('''cuda''' ) _A = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' _A = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) _A = [[[4_00, 6_50]]] _A = [[1]] _A = processor(images=np.array(__snake_case ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): _A = hf_model(**__snake_case ) _A = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 _A = processor( images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): _A = hf_model(**__snake_case ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 _A = ((75, 2_75, 17_25, 8_50),) _A = processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): _A = hf_model(**__snake_case ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. _A = [[[4_00, 6_50], [8_00, 6_50]]] _A = [[1, 1]] _A = processor( images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): _A = hf_model(**__snake_case ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": __A = argparse.ArgumentParser() __A = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) __A = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
484
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
0
"""simple docstring""" import functools def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int: lowercase__ : List[str] = len(__snake_case ) lowercase__ : Optional[Any] = len(__snake_case ) @functools.cache def min_distance(__lowerCamelCase , __lowerCamelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowercase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
560
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging a_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCAmelCase__ ( lowerCamelCase__ ): """simple docstring""" def __init__( self: Union[str, Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Any , __lowerCAmelCase: Any , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: str , __lowerCAmelCase: Union[str, Any] , ) -> Tuple: '''simple docstring''' super().__init__() if safety_checker is None: logger.warning( F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=__lowerCAmelCase , speech_processor=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , ) def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Optional[int] = "auto" ) -> Optional[int]: '''simple docstring''' if slice_size == "auto": __UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__lowerCAmelCase ) def _UpperCAmelCase ( self: str ) -> Optional[Any]: '''simple docstring''' self.enable_attention_slicing(__lowerCAmelCase ) @torch.no_grad() def __call__( self: List[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: Any=16_000 , __lowerCAmelCase: Any = 512 , __lowerCAmelCase: Tuple = 512 , __lowerCAmelCase: Any = 50 , __lowerCAmelCase: List[Any] = 7.5 , __lowerCAmelCase: Tuple = None , __lowerCAmelCase: Optional[int] = 1 , __lowerCAmelCase: List[Any] = 0.0 , __lowerCAmelCase: int = None , __lowerCAmelCase: str = None , __lowerCAmelCase: Dict = "pil" , __lowerCAmelCase: Dict = True , __lowerCAmelCase: Tuple = None , __lowerCAmelCase: Any = 1 , **__lowerCAmelCase: Optional[Any] , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase = self.speech_processor.feature_extractor( __lowerCAmelCase , return_tensors="pt" , sampling_rate=__lowerCAmelCase ).input_features.to(self.device ) __UpperCAmelCase = self.speech_model.generate(__lowerCAmelCase , max_length=480_000 ) __UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , normalize=__lowerCAmelCase )[ 0 ] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __UpperCAmelCase = 1 elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): __UpperCAmelCase = len(__lowerCAmelCase ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(__lowerCAmelCase )}.''' ) # get prompt text embeddings __UpperCAmelCase = self.tokenizer( __lowerCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) __UpperCAmelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) __UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length] __UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = text_embeddings.shape __UpperCAmelCase = text_embeddings.repeat(1 , __lowerCAmelCase , 1 ) __UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCAmelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __UpperCAmelCase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __UpperCAmelCase = 42 if negative_prompt is None: __UpperCAmelCase = [""] * batch_size elif type(__lowerCAmelCase ) is not type(__lowerCAmelCase ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCAmelCase )} !=''' F''' {type(__lowerCAmelCase )}.''' ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): __UpperCAmelCase = [negative_prompt] elif batch_size != len(__lowerCAmelCase ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(__lowerCAmelCase )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: __UpperCAmelCase = negative_prompt __UpperCAmelCase = text_input_ids.shape[-1] __UpperCAmelCase = self.tokenizer( __lowerCAmelCase , padding="max_length" , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="pt" , ) __UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __UpperCAmelCase = uncond_embeddings.shape[1] __UpperCAmelCase = uncond_embeddings.repeat(1 , __lowerCAmelCase , 1 ) __UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __UpperCAmelCase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __UpperCAmelCase = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device="cpu" , dtype=__lowerCAmelCase ).to( self.device ) else: __UpperCAmelCase = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) __UpperCAmelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__lowerCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __UpperCAmelCase = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __UpperCAmelCase = {} if accepts_eta: __UpperCAmelCase = eta for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance __UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __UpperCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # predict the noise residual __UpperCAmelCase = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample # perform guidance if do_classifier_free_guidance: __UpperCAmelCase , __UpperCAmelCase = noise_pred.chunk(2 ) __UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __UpperCAmelCase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __UpperCAmelCase = 1 / 0.18215 * latents __UpperCAmelCase = self.vae.decode(__lowerCAmelCase ).sample __UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __UpperCAmelCase = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase )
221
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
0
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def snake_case ( snake_case : Dataset , snake_case : Dict[str, str] ) -> Optional[int]: """simple docstring""" lowerCAmelCase = args.log_outputs lowerCAmelCase = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric lowerCAmelCase = load_metric('wer' ) lowerCAmelCase = load_metric('cer' ) # compute metrics lowerCAmelCase = wer.compute(references=result['target'] , predictions=result['prediction'] ) lowerCAmelCase = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results lowerCAmelCase = F'WER: {wer_result}\nCER: {cer_result}' print(__snake_case ) with open(F'{dataset_id}_eval_results.txt' , 'w' ) as f: f.write(__snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase = F'log_{dataset_id}_predictions.txt' lowerCAmelCase = F'log_{dataset_id}_targets.txt' with open(__snake_case , 'w' ) as p, open(__snake_case , 'w' ) as t: # mapping function to write output def write_to_file(snake_case : Union[str, Any] , snake_case : Tuple ): p.write(F'{i}' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(F'{i}' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__snake_case , with_indices=__snake_case ) def snake_case ( snake_case : str ) -> str: """simple docstring""" lowerCAmelCase = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase = re.sub(__snake_case , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: lowerCAmelCase = ' '.join(text.split(__snake_case ) ) return text def snake_case ( snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase = feature_extractor.sampling_rate # resample audio lowerCAmelCase = dataset.cast_column('audio' , Audio(sampling_rate=__snake_case ) ) # load eval pipeline if args.device is None: lowerCAmelCase = 0 if torch.cuda.is_available() else -1 lowerCAmelCase = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Tuple ): lowerCAmelCase = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase = prediction['text'] lowerCAmelCase = normalize_text(batch['sentence'] ) return batch # run inference on all examples lowerCAmelCase = dataset.map(__snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__snake_case , __snake_case ) if __name__ == "__main__": _UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) _UpperCamelCase : Optional[Any] = parser.parse_args() main(args)
284
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : int = logging.get_logger(__name__) UpperCamelCase__ : str = {"""vocab_file""": """spiece.model"""} UpperCamelCase__ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } UpperCamelCase__ : List[Any] = {"""bert_for_seq_generation""": 512} class _lowercase ( lowerCamelCase__ ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : List[int] = [] UpperCAmelCase_ : str = ['input_ids', 'attention_mask'] def __init__( self ,lowerCamelCase_ ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<::::>" ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> List[str]: '''simple docstring''' UpperCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase_ ,) UpperCAmelCase__ : List[Any] = vocab_file UpperCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase_ ) @property def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' return self.sp_model.get_piece_size() def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : str = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.__dict__.copy() UpperCAmelCase__ : int = None return state def __setstate__( self ,lowerCamelCase_ ) -> List[Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[Any]: '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ ,out_type=lowerCamelCase_ ) def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]: '''simple docstring''' return self.sp_model.piece_to_id(lowerCamelCase_ ) def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> int: '''simple docstring''' UpperCAmelCase__ : int = self.sp_model.IdToPiece(lowerCamelCase_ ) return token def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> int: '''simple docstring''' UpperCAmelCase__ : str = [] UpperCAmelCase__ : Dict = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCamelCase_ ) + token UpperCAmelCase__ : Union[str, Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Any: '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ : Union[str, Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ ,'''wb''' ) as fi: UpperCAmelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
614
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _A: List[str] = logging.get_logger(__name__) class UpperCAmelCase ( lowerCamelCase__ ): _A : Optional[int] = ['pixel_values'] def __init__( self , __A = True , __A = 32 , __A=PILImageResampling.BILINEAR , __A = True , **__A , ): __UpperCAmelCase = do_resize __UpperCAmelCase = do_rescale __UpperCAmelCase = size_divisor __UpperCAmelCase = resample super().__init__(**__A ) def __lowerCamelCase ( self , __A , __A , __A , __A = None , **__A ): __UpperCAmelCase , __UpperCAmelCase = get_image_size(__A ) # Rounds the height and width down to the closest multiple of size_divisor __UpperCAmelCase = height // size_divisor * size_divisor __UpperCAmelCase = width // size_divisor * size_divisor __UpperCAmelCase = resize(__A , (new_h, new_w) , resample=__A , data_format=__A , **__A ) return image def __lowerCamelCase ( self , __A , __A , __A = None , **__A ): return rescale(image=__A , scale=__A , data_format=__A , **__A ) def __lowerCamelCase ( self , __A , __A = None , __A = None , __A=None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ): __UpperCAmelCase = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase = size_divisor if size_divisor is not None else self.size_divisor __UpperCAmelCase = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('size_divisor is required for resizing' ) __UpperCAmelCase = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError('Invalid image(s)' ) # All transformations expect numpy arrays. __UpperCAmelCase = [to_numpy_array(__A ) for img in images] if do_resize: __UpperCAmelCase = [self.resize(__A , size_divisor=__A , resample=__A ) for image in images] if do_rescale: __UpperCAmelCase = [self.rescale(__A , scale=1 / 255 ) for image in images] __UpperCAmelCase = [to_channel_dimension_format(__A , __A ) for image in images] __UpperCAmelCase = {'pixel_values': images} return BatchFeature(data=__A , tensor_type=__A )
126
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
0
"""simple docstring""" import itertools import os import re __magic_name__ = re.compile(R"""([A-Z]+)([A-Z][a-z])""") __magic_name__ = re.compile(R"""([a-z\d])([A-Z])""") __magic_name__ = re.compile(R"""(?<!_)_(?!_)""") __magic_name__ = re.compile(R"""(_{2,})""") __magic_name__ = R"""^\w+(\.\w+)*$""" __magic_name__ = R"""<>:/\|?*""" def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = _uppercase_uppercase_re.sub(r"""\1_\2""" , __snake_case ) lowerCamelCase__ = _lowercase_uppercase_re.sub(r"""\1_\2""" , __snake_case ) return name.lower() def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ = _single_underscore_re.split(__snake_case ) lowerCamelCase__ = [_multiple_underscores_re.split(__snake_case ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__snake_case ) if n != """""" ) def _A ( __lowercase ): """simple docstring""" if os.path.basename(__snake_case ) != name: raise ValueError(f"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(__snake_case ) def _A ( __lowercase , __lowercase ): """simple docstring""" if os.path.basename(__snake_case ) != name: raise ValueError(f"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , __snake_case ): raise ValueError(f"""Split name should match \'{_split_re}\'\' but got \'{split}\'.""" ) return f"""{filename_prefix_for_name(__snake_case )}-{split}""" def _A ( __lowercase , __lowercase , __lowercase , __lowercase=None ): """simple docstring""" lowerCamelCase__ = filename_prefix_for_split(__snake_case , __snake_case ) if filetype_suffix: prefix += f""".{filetype_suffix}""" lowerCamelCase__ = os.path.join(__snake_case , __snake_case ) return f"""{filepath}*""" def _A ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None ): """simple docstring""" lowerCamelCase__ = filename_prefix_for_split(__snake_case , __snake_case ) lowerCamelCase__ = os.path.join(__snake_case , __snake_case ) if shard_lengths: lowerCamelCase__ = len(__snake_case ) lowerCamelCase__ = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(__snake_case )] if filetype_suffix: lowerCamelCase__ = [filename + f""".{filetype_suffix}""" for filename in filenames] return filenames else: lowerCamelCase__ = prefix if filetype_suffix: filename += f""".{filetype_suffix}""" return [filename]
129
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
0
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=30 ,__UpperCAmelCase=2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=32 ,__UpperCAmelCase=5 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=10 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=None ,) -> List[Any]: lowerCAmelCase__ : Any = parent lowerCAmelCase__ : List[str] = batch_size lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : Union[str, Any] = num_channels lowerCAmelCase__ : int = is_training lowerCAmelCase__ : Tuple = use_labels lowerCAmelCase__ : List[Any] = hidden_size lowerCAmelCase__ : List[Any] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Dict = hidden_dropout_prob lowerCAmelCase__ : Dict = attention_probs_dropout_prob lowerCAmelCase__ : Tuple = type_sequence_label_size lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : str = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : List[str] = (image_size // patch_size) ** 2 lowerCAmelCase__ : str = num_patches + 1 def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : str = None if self.use_labels: lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> Optional[Any]: return ViTMSNConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Union[str, Any] = ViTMSNModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = self.type_sequence_label_size lowerCAmelCase__ : str = ViTMSNForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Any = model(__UpperCAmelCase ,labels=__UpperCAmelCase ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ : Tuple = 1 lowerCAmelCase__ : Any = ViTMSNForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' __lowercase : Union[str, Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __lowercase : int = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) __lowercase : Tuple = False __lowercase : str = False __lowercase : Any = False __lowercase : List[str] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Tuple = ViTMSNModelTester(self ) lowerCAmelCase__ : Union[str, Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ,hidden_size=37 ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> Optional[Any]: pass def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase ,nn.Linear ) ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) lowerCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : str = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Tuple: for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : int = ViTMSNModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[str]: return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self ) -> List[str]: torch.manual_seed(2 ) lowerCAmelCase__ : str = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : int = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = prepare_img() lowerCAmelCase__ : int = image_processor(images=__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) # verify the logits lowerCAmelCase__ : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
565
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
0
"""simple docstring""" from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __A = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCAmelCase ( lowerCamelCase__ ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' super().__init__() if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1: lowerCAmelCase__ :Dict = ( F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " 'to update the config accordingly as leaving `steps_offset` might led to incorrect results' ' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,' ' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`' ' file' ) deprecate('steps_offset!=1' , '1.0.0' , __UpperCAmelCase , standard_warn=__UpperCAmelCase ) lowerCAmelCase__ :List[str] = dict(scheduler.config ) lowerCAmelCase__ :Optional[int] = 1 lowerCAmelCase__ :List[Any] = FrozenDict(__UpperCAmelCase ) if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False: lowerCAmelCase__ :Tuple = ( F"The configuration file of this scheduler: {scheduler} has not set the configuration" ' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make' ' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to' ' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face' ' Hub, it would be very nice if you could open a Pull request for the' ' `scheduler/scheduler_config.json` file' ) deprecate('skip_prk_steps not set' , '1.0.0' , __UpperCAmelCase , standard_warn=__UpperCAmelCase ) lowerCAmelCase__ :List[str] = dict(scheduler.config ) lowerCAmelCase__ :int = True lowerCAmelCase__ :List[str] = FrozenDict(__UpperCAmelCase ) if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' ) self.register_modules( segmentation_model=__UpperCAmelCase , segmentation_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , ) def snake_case ( self , __UpperCAmelCase = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCAmelCase__ :List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' self.enable_attention_slicing(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) lowerCAmelCase__ :Tuple = torch.device('cuda' ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__UpperCAmelCase , __UpperCAmelCase ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self ): '''simple docstring''' if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCAmelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_1_2 , __UpperCAmelCase = 5_0 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Dict = self.segmentation_processor( text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device ) lowerCAmelCase__ :Union[str, Any] = self.segmentation_model(**__UpperCAmelCase ) lowerCAmelCase__ :Tuple = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowerCAmelCase__ :List[Any] = self.numpy_to_pil(__UpperCAmelCase )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowerCAmelCase__ :Optional[Any] = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , )
93
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ : List[str] = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
0
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name''']) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv''']) @pytest.mark.parametrize('''revision''' , [None, '''v2''']) def lowerCamelCase (a_ :Tuple , a_ :Optional[int] , a_ :Optional[int]) -> Union[str, Any]: lowercase :int = hf_hub_url(repo_id=a_ , path=a_ , revision=a_) assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a_)}"""
677
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger() @dataclass class __magic_name__ : __A : nn.Module __A : List[nn.Module] = field(default_factory=__UpperCAmelCase ) __A : list = field(default_factory=__UpperCAmelCase ) def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tensor , snake_case__ : Tensor ): '''simple docstring''' lowercase :List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case__ ) def __call__( self : int , snake_case__ : Tensor ): '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case__ ) [x.remove() for x in self.handles] return self @property def __snake_case ( self : int ): '''simple docstring''' return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __magic_name__ : __A : nn.Module __A : nn.Module __A : int = 0 __A : List = field(default_factory=__UpperCAmelCase ) __A : List = field(default_factory=__UpperCAmelCase ) def __call__( self : Dict , snake_case__ : Tensor ): '''simple docstring''' lowercase :Dict = Tracker(self.dest )(snake_case__ ).parametrized lowercase :Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized lowercase :List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) ) lowercase :Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) ) if len(snake_case__ ) != len(snake_case__ ): raise Exception( f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while""" f""" destination module has {len(snake_case__ )}.""" ) for dest_m, src_m in zip(snake_case__ , snake_case__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) def lowerCamelCase (a_ :str , a_ :ResNetConfig , a_ :Path , a_ :bool = True) -> Optional[Any]: print(F"""Converting {name}...""") with torch.no_grad(): lowercase :Union[str, Any] = timm.create_model(a_ , pretrained=a_).eval() lowercase :Tuple = ResNetForImageClassification(a_).eval() lowercase :int = ModuleTransfer(src=a_ , dest=a_) lowercase :List[Any] = torch.randn((1, 3, 224, 224)) module_transfer(a_) assert torch.allclose(from_model(a_) , our_model(a_).logits), "The model logits don't match the original one." lowercase :List[Any] = F"""resnet{'-'.join(name.split('resnet'))}""" print(a_) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , ) # we can use the convnext one lowercase :Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''') image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , ) print(F"""Pushed {checkpoint_name}""") def lowerCamelCase (a_ :Path , a_ :str = None , a_ :bool = True) -> int: lowercase :Optional[Any] = '''imagenet-1k-id2label.json''' lowercase :Union[str, Any] = 1000 lowercase :Any = (1, num_labels) lowercase :Tuple = '''huggingface/label-files''' lowercase :List[str] = num_labels lowercase :Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowercase :Any = {int(a_): v for k, v in idalabel.items()} lowercase :str = idalabel lowercase :Any = {v: k for k, v in idalabel.items()} lowercase :Union[str, Any] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_) lowercase :Optional[int] = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''), } if model_name: convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_) else: for model_name, config in names_to_config.items(): convert_weight_and_push(a_ , a_ , a_ , a_) return config, expected_shape if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
677
1
"""simple docstring""" # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''') class __magic_name__ : def __init__( self : List[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : bool = True , snake_case__ : bool = False ): '''simple docstring''' lowercase :str = scheduler lowercase :List[Any] = optimizers if isinstance(snake_case__ , (list, tuple) ) else [optimizers] lowercase :Optional[Any] = split_batches lowercase :Optional[int] = step_with_optimizer lowercase :List[str] = GradientState() def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ): '''simple docstring''' if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*snake_case__ , **snake_case__ ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*snake_case__ , **snake_case__ ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowercase :Any = AcceleratorState().num_processes for _ in range(snake_case__ ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*snake_case__ , **snake_case__ ) else: self.scheduler.step(*snake_case__ , **snake_case__ ) def __snake_case ( self : Dict ): '''simple docstring''' return self.scheduler.get_last_lr() def __snake_case ( self : Tuple ): '''simple docstring''' return self.scheduler.state_dict() def __snake_case ( self : str , snake_case__ : List[Any] ): '''simple docstring''' self.scheduler.load_state_dict(snake_case__ ) def __snake_case ( self : List[str] ): '''simple docstring''' return self.scheduler.get_lr() def __snake_case ( self : Any , *snake_case__ : Dict , **snake_case__ : Dict ): '''simple docstring''' return self.scheduler.print_lr(*snake_case__ , **snake_case__ )
677
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __A : Any = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __A : List[Any] = ( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __A : List[str] = False __A : int = False def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=False ): '''simple docstring''' lowercase :Union[str, Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): lowercase :Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class __magic_name__ ( __UpperCAmelCase ): def __init__( self : Any , snake_case__ : Dict , snake_case__ : Dict=1_3 , snake_case__ : Tuple=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=9_9 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Any=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : int=None , ): '''simple docstring''' lowercase :Tuple = parent lowercase :Tuple = batch_size lowercase :Optional[Any] = seq_length lowercase :Optional[Any] = is_training lowercase :Optional[Any] = use_input_mask lowercase :List[Any] = use_token_type_ids lowercase :str = use_labels lowercase :List[str] = vocab_size lowercase :str = hidden_size lowercase :Optional[int] = num_hidden_layers lowercase :Dict = num_attention_heads lowercase :Any = intermediate_size lowercase :List[str] = hidden_act lowercase :Optional[Any] = hidden_dropout_prob lowercase :List[Any] = attention_probs_dropout_prob lowercase :List[Any] = max_position_embeddings lowercase :List[Any] = type_vocab_size lowercase :Union[str, Any] = type_sequence_label_size lowercase :Union[str, Any] = initializer_range lowercase :Any = num_labels lowercase :int = num_choices lowercase :Dict = scope lowercase :Dict = embedding_size def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase :int = None if self.use_input_mask: lowercase :int = random_attention_mask([self.batch_size, self.seq_length] ) lowercase :Tuple = None if self.use_token_type_ids: lowercase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase :Union[str, Any] = None lowercase :int = None lowercase :str = None if self.use_labels: lowercase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase :Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase :Optional[int] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ): '''simple docstring''' lowercase :Dict = TFMobileBertModel(config=snake_case__ ) lowercase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowercase :List[Any] = model(snake_case__ ) lowercase :Optional[int] = [input_ids, input_mask] lowercase :Optional[int] = model(snake_case__ ) lowercase :Union[str, Any] = model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __snake_case ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] ): '''simple docstring''' lowercase :Any = TFMobileBertForMaskedLM(config=snake_case__ ) lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowercase :int = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ): '''simple docstring''' lowercase :Optional[Any] = TFMobileBertForNextSentencePrediction(config=snake_case__ ) lowercase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowercase :Optional[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __snake_case ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' lowercase :int = TFMobileBertForPreTraining(config=snake_case__ ) lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowercase :List[Any] = model(snake_case__ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' lowercase :List[Any] = self.num_labels lowercase :List[Any] = TFMobileBertForSequenceClassification(config=snake_case__ ) lowercase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowercase :List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ): '''simple docstring''' lowercase :Tuple = self.num_choices lowercase :Any = TFMobileBertForMultipleChoice(config=snake_case__ ) lowercase :Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) lowercase :Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) lowercase :List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) lowercase :Dict = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } lowercase :Optional[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __snake_case ( self : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict ): '''simple docstring''' lowercase :List[Any] = self.num_labels lowercase :List[str] = TFMobileBertForTokenClassification(config=snake_case__ ) lowercase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowercase :int = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ): '''simple docstring''' lowercase :Union[str, Any] = TFMobileBertForQuestionAnswering(config=snake_case__ ) lowercase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowercase :str = model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase :Dict = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) :Dict = config_and_inputs lowercase :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self ) lowercase :List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __snake_case ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case__ ) def __snake_case ( self : Any ): '''simple docstring''' lowercase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ ) def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ ) def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ ) def __snake_case ( self : Optional[int] ): '''simple docstring''' lowercase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ ) def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ ) def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ ) def __snake_case ( self : Dict ): '''simple docstring''' lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ ) @slow def __snake_case ( self : int ): '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: lowercase :List[str] = TFMobileBertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_tf class __magic_name__ ( unittest.TestCase ): @slow def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) lowercase :Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowercase :List[Any] = model(snake_case__ )[0] lowercase :Union[str, Any] = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , snake_case__ ) lowercase :Optional[int] = tf.constant( [ [ [-4.5_91_95_47, -9.24_82_95, -9.64_52_56], [-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37], [-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
677
1
"""simple docstring""" def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Tuple , a_ :List[Any]) -> str: if height >= 1: move_tower(height - 1 , a_ , a_ , a_) move_disk(a_ , a_) move_tower(height - 1 , a_ , a_ , a_) def lowerCamelCase (a_ :int , a_ :Union[str, Any]) -> str: print('''moving disk from''' , a_ , '''to''' , a_) def lowerCamelCase () -> Tuple: lowercase :int = int(input('''Height of hanoi: ''').strip()) move_tower(a_ , '''A''' , '''B''' , '''C''') if __name__ == "__main__": main()
677
"""simple docstring""" import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def lowerCamelCase (a_ :int) -> List[str]: random.seed(a_) np.random.seed(a_) torch.manual_seed(a_) torch.cuda.manual_seed_all(a_) # ^^ safe to call this function even if cuda is not available class __magic_name__ : def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ): '''simple docstring''' if isinstance(snake_case__ , torch.nn.Module ): lowercase :int = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , ) lowercase :Dict = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility lowercase :Optional[Any] = True if kwargs.get('''max_value''' , snake_case__ ) is not None: lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.''' deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) lowercase :Optional[int] = kwargs['''max_value'''] if kwargs.get('''min_value''' , snake_case__ ) is not None: lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.''' deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) lowercase :str = kwargs['''min_value'''] lowercase :Any = list(snake_case__ ) lowercase :Optional[Any] = [p.clone().detach() for p in parameters] if kwargs.get('''device''' , snake_case__ ) is not None: lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.''' deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) self.to(device=kwargs['''device'''] ) lowercase :int = None lowercase :int = decay lowercase :Union[str, Any] = min_decay lowercase :List[Any] = update_after_step lowercase :Union[str, Any] = use_ema_warmup lowercase :Any = inv_gamma lowercase :Any = power lowercase :str = 0 lowercase :int = None # set in `step()` lowercase :List[str] = model_cls lowercase :Any = model_config @classmethod def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ ) lowercase :List[Any] = model_cls.from_pretrained(snake_case__ ) lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config ) ema_model.load_state_dict(snake_case__ ) return ema_model def __snake_case ( self : int , snake_case__ : Union[str, Any] ): '''simple docstring''' if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) lowercase :Dict = self.model_cls.from_config(self.model_config ) lowercase :Tuple = self.state_dict() state_dict.pop('''shadow_params''' , snake_case__ ) model.register_to_config(**snake_case__ ) self.copy_to(model.parameters() ) model.save_pretrained(snake_case__ ) def __snake_case ( self : int , snake_case__ : int ): '''simple docstring''' lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power else: lowercase :Dict = (1 + step) / (1_0 + step) lowercase :Optional[int] = min(snake_case__ , self.decay ) # make sure decay is not smaller than min_decay lowercase :Optional[int] = max(snake_case__ , self.min_decay ) return cur_decay_value @torch.no_grad() def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' if isinstance(snake_case__ , torch.nn.Module ): lowercase :Tuple = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , ) lowercase :Union[str, Any] = parameters.parameters() lowercase :Optional[Any] = list(snake_case__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. lowercase :List[Any] = self.get_decay(self.optimization_step ) lowercase :Optional[Any] = decay lowercase :List[Any] = 1 - decay lowercase :List[str] = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , snake_case__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(snake_case__ ) def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' lowercase :Optional[Any] = list(snake_case__ ) for s_param, param in zip(self.shadow_params , snake_case__ ): param.data.copy_(s_param.to(param.device ).data ) def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ): '''simple docstring''' lowercase :str = [ p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ ) for p in self.shadow_params ] def __snake_case ( self : Dict ): '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' lowercase :str = [param.detach().cpu().clone() for param in parameters] def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params , snake_case__ ): param.data.copy_(c_param.data ) # Better memory-wise. lowercase :Dict = None def __snake_case ( self : Union[str, Any] , snake_case__ : dict ): '''simple docstring''' lowercase :List[str] = copy.deepcopy(snake_case__ ) lowercase :Any = state_dict.get('''decay''' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) lowercase :int = state_dict.get('''min_decay''' , self.min_decay ) if not isinstance(self.min_decay , snake_case__ ): raise ValueError('''Invalid min_decay''' ) lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step ) if not isinstance(self.optimization_step , snake_case__ ): raise ValueError('''Invalid optimization_step''' ) lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step ) if not isinstance(self.update_after_step , snake_case__ ): raise ValueError('''Invalid update_after_step''' ) lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , snake_case__ ): raise ValueError('''Invalid use_ema_warmup''' ) lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('''Invalid inv_gamma''' ) lowercase :Dict = state_dict.get('''power''' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('''Invalid power''' ) lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ ) if shadow_params is not None: lowercase :List[Any] = shadow_params if not isinstance(self.shadow_params , snake_case__ ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
677
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class __magic_name__ ( __UpperCAmelCase ): __A : Optional[Any] = "pix2struct_text_model" __A : Tuple = ["past_key_values"] __A : str = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Dict , snake_case__ : str=5_0_2_4_4 , snake_case__ : Dict=7_6_8 , snake_case__ : Union[str, Any]=6_4 , snake_case__ : Dict=2_0_4_8 , snake_case__ : Any=1_2 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : int=3_2 , snake_case__ : Tuple=1_2_8 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Tuple=1e-6 , snake_case__ : List[str]=1.0 , snake_case__ : Any="gelu_new" , snake_case__ : str=0 , snake_case__ : List[Any]=False , snake_case__ : Any=0 , snake_case__ : Optional[int]=1 , snake_case__ : Any=False , snake_case__ : Dict=True , **snake_case__ : Optional[int] , ): '''simple docstring''' lowercase :Optional[Any] = vocab_size lowercase :Optional[int] = hidden_size lowercase :Optional[int] = d_kv lowercase :Union[str, Any] = d_ff lowercase :Optional[int] = num_layers lowercase :List[str] = num_heads lowercase :Optional[int] = relative_attention_num_buckets lowercase :Tuple = relative_attention_max_distance lowercase :Any = dropout_rate lowercase :Union[str, Any] = layer_norm_epsilon lowercase :List[str] = initializer_factor lowercase :int = use_cache lowercase :int = eos_token_id lowercase :List[Any] = decoder_start_token_id # for backwards compatibility lowercase :List[Any] = dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def __snake_case ( cls : List[str] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[Any] ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase :List[Any] = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": lowercase :int = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class __magic_name__ ( __UpperCAmelCase ): __A : int = "pix2struct_vision_model" def __init__( self : str , snake_case__ : List[Any]=7_6_8 , snake_case__ : str=7_6_8 , snake_case__ : List[Any]=2_0_4_8 , snake_case__ : Optional[int]=6_4 , snake_case__ : str=1_2 , snake_case__ : str=1_2 , snake_case__ : Tuple="gelu_new" , snake_case__ : Any=1e-6 , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=1e-1_0 , snake_case__ : Union[str, Any]=1.0 , snake_case__ : List[Any]=4_0_9_6 , snake_case__ : int=3_2 , snake_case__ : Optional[Any]=1_2_8 , **snake_case__ : Dict , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Optional[Any] = hidden_size lowercase :Optional[Any] = patch_embed_hidden_size lowercase :Optional[Any] = d_ff lowercase :Any = dropout_rate lowercase :List[str] = num_hidden_layers lowercase :str = num_attention_heads lowercase :List[str] = initializer_range lowercase :int = initializer_factor lowercase :int = attention_dropout lowercase :int = layer_norm_eps lowercase :Union[str, Any] = dense_act_fn lowercase :Union[str, Any] = seq_len lowercase :Tuple = relative_attention_num_buckets lowercase :str = relative_attention_max_distance lowercase :Optional[Any] = d_kv @classmethod def __snake_case ( cls : int , snake_case__ : Union[str, os.PathLike] , **snake_case__ : List[Any] ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase :int = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": lowercase :Dict = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class __magic_name__ ( __UpperCAmelCase ): __A : Optional[int] = "pix2struct" __A : Any = True def __init__( self : int , snake_case__ : int=None , snake_case__ : Optional[Any]=None , snake_case__ : str=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False , snake_case__ : Union[str, Any]=True , **snake_case__ : str , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase :List[str] = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: lowercase :Dict = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) lowercase :List[str] = PixaStructTextConfig(**snake_case__ ) lowercase :str = PixaStructVisionConfig(**snake_case__ ) lowercase :List[str] = self.text_config.decoder_start_token_id lowercase :List[str] = self.text_config.pad_token_id lowercase :Union[str, Any] = self.text_config.eos_token_id lowercase :Union[str, Any] = initializer_factor lowercase :List[Any] = initializer_range lowercase :Union[str, Any] = self.initializer_range lowercase :List[str] = self.initializer_range lowercase :str = is_vqa @classmethod def __snake_case ( cls : int , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def __snake_case ( self : Dict ): '''simple docstring''' lowercase :Union[str, Any] = copy.deepcopy(self.__dict__ ) lowercase :Union[str, Any] = self.text_config.to_dict() lowercase :List[Any] = self.vision_config.to_dict() lowercase :List[str] = self.__class__.model_type return output
677
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]: return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]: lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :]) lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2]) lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :]) lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2]) lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :]) lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2]) lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :]) lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2]) return k, o, q, v def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]: if split_mlp_wi: lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] lowercase :Dict = (wi_a, wi_a) else: lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]: return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int: lowercase :Dict = traverse_util.flatten_dict(variables['''target''']) lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old print('''Split MLP:''' , a_) lowercase :str = collections.OrderedDict() # Shared embeddings. lowercase :int = old['''token_embedder/embedding'''] # Encoder. for i in range(a_): # Block i, layer 0 (Self Attention). lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''') lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''') lowercase :Dict = layer_norm lowercase :Dict = k.T lowercase :Union[str, Any] = o.T lowercase :List[Any] = q.T lowercase :int = v.T # Block i, layer 1 (MLP). lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''') lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_) lowercase :int = layer_norm if split_mlp_wi: lowercase :Tuple = wi[0].T lowercase :Tuple = wi[1].T else: lowercase :int = wi.T lowercase :Tuple = wo.T if scalable_attention: # convert the rel_embedding of each layer lowercase :Dict = tax_relpos_bias_lookup( a_ , a_ , '''encoder''').T lowercase :str = old['''encoder/encoder_norm/scale'''] if not scalable_attention: lowercase :str = tax_relpos_bias_lookup( a_ , 0 , '''encoder''').T lowercase :List[Any] = tax_relpos_bias_lookup( a_ , 0 , '''decoder''').T if not is_encoder_only: # Decoder. for i in range(a_): # Block i, layer 0 (Self Attention). lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''') lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''') lowercase :List[str] = layer_norm lowercase :Dict = k.T lowercase :List[Any] = o.T lowercase :List[Any] = q.T lowercase :Any = v.T # Block i, layer 1 (Cross Attention). lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''') lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''') lowercase :int = layer_norm lowercase :Dict = k.T lowercase :int = o.T lowercase :List[Any] = q.T lowercase :Tuple = v.T # Block i, layer 2 (MLP). lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''') lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_) lowercase :Any = layer_norm if split_mlp_wi: lowercase :int = wi[0].T lowercase :Union[str, Any] = wi[1].T else: lowercase :int = wi.T lowercase :List[Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale'''] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowercase :int = old['''decoder/logits_dense/kernel'''].T return new def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple: lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()]) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowercase :Any = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowercase :Optional[Any] = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''') lowercase :Optional[int] = state_dict['''shared.weight'''] return state_dict def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]: lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_) lowercase :Optional[int] = convert_tax_to_pytorch( a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_) lowercase :Union[str, Any] = make_state_dict(a_ , a_) model.load_state_dict(a_ , strict=a_) def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple: lowercase :Optional[int] = MTaConfig.from_json_file(a_) print(F"""Building PyTorch model from configuration: {config}""") # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowercase :Union[str, Any] = UMTaEncoderModel(a_) else: lowercase :int = UMTaForConditionalGeneration(a_) # Load weights from tf checkpoint load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""") model.save_pretrained(a_) # Verify that we can load the checkpoint. model.from_pretrained(a_) print('''Done''') if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
677
1
"""simple docstring""" from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowerCamelCase (a_ :Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: lowercase :Dict = [] lowercase :Tuple = [] lowercase :Union[str, Any] = [] for rt in rc.restypes: lowercase :List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase :int = {name: i for i, name in enumerate(a_)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase :Optional[Any] = torch.tensor( a_ , dtype=torch.intaa , device=protein['''aatype'''].device , ) lowercase :Any = torch.tensor( a_ , dtype=torch.intaa , device=protein['''aatype'''].device , ) lowercase :Union[str, Any] = torch.tensor( a_ , dtype=torch.floataa , device=protein['''aatype'''].device , ) lowercase :Optional[Any] = protein['''aatype'''].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase :Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype] lowercase :str = restype_atomaa_mask[protein_aatype] lowercase :Union[str, Any] = residx_atomaa_mask lowercase :int = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase :List[Any] = restype_atomaa_to_atomaa[protein_aatype] lowercase :Optional[Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase :Optional[int] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device) for restype, restype_letter in enumerate(rc.restypes): lowercase :Dict = rc.restype_atoa[restype_letter] lowercase :Any = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase :Dict = rc.atom_order[atom_name] lowercase :Union[str, Any] = 1 lowercase :Tuple = restype_atomaa_mask[protein_aatype] lowercase :Tuple = residx_atomaa_mask return protein def lowerCamelCase (a_ :Dict[str, torch.Tensor]) -> Dict[str, np.ndarray]: lowercase :str = tree_map(lambda a_: torch.tensor(a_ , device=batch['''aatype'''].device) , a_ , np.ndarray) lowercase :int = tensor_tree_map(lambda a_: np.array(a_) , make_atomaa_masks(a_)) return out
677
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
677
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __magic_name__ ( __UpperCAmelCase ): def __init__( self : List[Any] , snake_case__ : TransformeraDModel , snake_case__ : AutoencoderKL , snake_case__ : KarrasDiffusionSchedulers , snake_case__ : Optional[Dict[int, str]] = None , ): '''simple docstring''' super().__init__() self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__ ) # create a imagenet -> id dictionary for easier use lowercase :str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''',''' ): lowercase :str = int(snake_case__ ) lowercase :Tuple = dict(sorted(self.labels.items() ) ) def __snake_case ( self : List[str] , snake_case__ : Union[str, List[str]] ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): lowercase :int = list(snake_case__ ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : float = 4.0 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : int = 5_0 , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ): '''simple docstring''' lowercase :List[Any] = len(snake_case__ ) lowercase :Dict = self.transformer.config.sample_size lowercase :Optional[int] = self.transformer.config.in_channels lowercase :Tuple = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , ) lowercase :Optional[int] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowercase :Dict = torch.tensor(snake_case__ , device=self.device ).reshape(-1 ) lowercase :List[str] = torch.tensor([1_0_0_0] * batch_size , device=self.device ) lowercase :str = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(snake_case__ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowercase :Dict = latent_model_input[: len(snake_case__ ) // 2] lowercase :Optional[int] = torch.cat([half, half] , dim=0 ) lowercase :Union[str, Any] = self.scheduler.scale_model_input(snake_case__ , snake_case__ ) lowercase :Any = t if not torch.is_tensor(snake_case__ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowercase :int = latent_model_input.device.type == '''mps''' if isinstance(snake_case__ , snake_case__ ): lowercase :List[str] = torch.floataa if is_mps else torch.floataa else: lowercase :int = torch.intaa if is_mps else torch.intaa lowercase :Any = torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowercase :Union[str, Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase :int = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowercase :Any = self.transformer( snake_case__ , timestep=snake_case__ , class_labels=snake_case__ ).sample # perform guidance if guidance_scale > 1: lowercase , lowercase :List[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowercase , lowercase :Tuple = torch.split(snake_case__ , len(snake_case__ ) // 2 , dim=0 ) lowercase :Any = uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowercase :List[Any] = torch.cat([half_eps, half_eps] , dim=0 ) lowercase :Optional[int] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowercase , lowercase :Tuple = torch.split(snake_case__ , snake_case__ , dim=1 ) else: lowercase :Any = noise_pred # compute previous image: x_t -> x_t-1 lowercase :Dict = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample if guidance_scale > 1: lowercase , lowercase :str = latent_model_input.chunk(2 , dim=0 ) else: lowercase :Optional[Any] = latent_model_input lowercase :List[str] = 1 / self.vae.config.scaling_factor * latents lowercase :int = self.vae.decode(snake_case__ ).sample lowercase :Dict = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase :Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase :List[Any] = self.numpy_to_pil(snake_case__ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=snake_case__ )
677
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class __magic_name__ ( __UpperCAmelCase ): __A : Tuple = "donut-swin" __A : Optional[Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[str] , snake_case__ : Any=2_2_4 , snake_case__ : Tuple=4 , snake_case__ : str=3 , snake_case__ : Dict=9_6 , snake_case__ : Optional[Any]=[2, 2, 6, 2] , snake_case__ : Any=[3, 6, 1_2, 2_4] , snake_case__ : List[str]=7 , snake_case__ : Dict=4.0 , snake_case__ : str=True , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=1e-5 , **snake_case__ : Any , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Union[str, Any] = image_size lowercase :Optional[Any] = patch_size lowercase :List[str] = num_channels lowercase :Optional[int] = embed_dim lowercase :Optional[Any] = depths lowercase :List[Any] = len(snake_case__ ) lowercase :Optional[Any] = num_heads lowercase :int = window_size lowercase :str = mlp_ratio lowercase :Optional[int] = qkv_bias lowercase :Dict = hidden_dropout_prob lowercase :Any = attention_probs_dropout_prob lowercase :Any = drop_path_rate lowercase :int = hidden_act lowercase :int = use_absolute_embeddings lowercase :List[str] = layer_norm_eps lowercase :Union[str, Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase :str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
677
1
"""simple docstring""" # using dfs for finding eulerian path traversal def lowerCamelCase (a_ :int , a_ :Tuple , a_ :Union[str, Any] , a_ :Any=None) -> Dict: lowercase :Optional[Any] = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowercase , lowercase :Any = True, True lowercase :str = dfs(a_ , a_ , a_ , a_) return path def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[Any]) -> Tuple: lowercase :Union[str, Any] = 0 lowercase :Dict = -1 for i in range(a_): if i not in graph.keys(): continue if len(graph[i]) % 2 == 1: odd_degree_nodes += 1 lowercase :Tuple = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def lowerCamelCase (a_ :Dict , a_ :Optional[Any]) -> Optional[int]: lowercase :Union[str, Any] = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)] lowercase , lowercase :int = check_circuit_or_path(a_ , a_) if check == 3: print('''graph is not Eulerian''') print('''no path''') return lowercase :int = 1 if check == 2: lowercase :Tuple = odd_node print('''graph has a Euler path''') if check == 1: print('''graph has a Euler cycle''') lowercase :Optional[Any] = dfs(a_ , a_ , a_) print(a_) def lowerCamelCase () -> Any: lowercase :int = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowercase :Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowercase :Union[str, Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowercase :int = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowercase :Tuple = { 1: [], 2: [] # all degree is zero } lowercase :int = 10 check_euler(a_ , a_) check_euler(a_ , a_) check_euler(a_ , a_) check_euler(a_ , a_) check_euler(a_ , a_) if __name__ == "__main__": main()
677
"""simple docstring""" import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict: output_path.parent.mkdir(parents=a_ , exist_ok=a_) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , ) else: export( a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , ) @torch.no_grad() def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]: lowercase :Any = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowercase :Union[str, Any] = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''') else: lowercase :List[str] = '''cpu''' lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_) lowercase :List[Any] = Path(a_) # TEXT ENCODER lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings lowercase :Dict = pipeline.text_encoder.config.hidden_size lowercase :Union[str, Any] = pipeline.tokenizer( '''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''sequence'''}, } , opset=a_ , ) del pipeline.text_encoder # UNET lowercase :Any = pipeline.unet.config.in_channels lowercase :List[Any] = pipeline.unet.config.sample_size lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx''' onnx_export( pipeline.unet , model_args=( torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_), torch.randn(2).to(device=a_ , dtype=a_), torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_), False, ) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''timestep''': {0: '''batch'''}, '''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''}, } , opset=a_ , use_external_data_format=a_ , ) lowercase :List[Any] = str(unet_path.absolute().as_posix()) lowercase :str = os.path.dirname(a_) lowercase :Optional[Any] = onnx.load(a_) # clean up existing tensor files shutil.rmtree(a_) os.mkdir(a_) # collate external tensor files into one onnx.save_model( a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , ) del pipeline.unet # VAE ENCODER lowercase :Tuple = pipeline.vae lowercase :Optional[Any] = vae_encoder.config.in_channels lowercase :Any = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample() onnx_export( a_ , model_args=( torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_), False, ) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=a_ , ) # VAE DECODER lowercase :Any = pipeline.vae lowercase :Dict = vae_decoder.config.latent_channels lowercase :Union[str, Any] = vae_decoder.config.out_channels # forward only through the decoder part lowercase :List[Any] = vae_encoder.decode onnx_export( a_ , model_args=( torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=a_ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: lowercase :Dict = pipeline.safety_checker lowercase :str = safety_checker.config.vision_config.num_channels lowercase :str = safety_checker.config.vision_config.image_size lowercase :List[str] = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_), torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_), ) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={ '''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''}, } , opset=a_ , ) del pipeline.safety_checker lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''') lowercase :Optional[Any] = pipeline.feature_extractor else: lowercase :int = None lowercase :Union[str, Any] = None lowercase :Optional[int] = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(a_) print('''ONNX pipeline saved to''' , a_) del pipeline del onnx_pipeline lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''') print('''ONNX pipeline is loadable''') if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=14, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') UpperCAmelCase = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
677
1
"""simple docstring""" class __magic_name__ : def __init__( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple ): '''simple docstring''' lowercase :Any = None lowercase :Dict = None lowercase :Optional[int] = graph self._normalize_graph(snake_case__ , snake_case__ ) lowercase :str = len(snake_case__ ) lowercase :Optional[Any] = None def __snake_case ( self : str , snake_case__ : Any , snake_case__ : str ): '''simple docstring''' if sources is int: lowercase :List[str] = [sources] if sinks is int: lowercase :str = [sinks] if len(snake_case__ ) == 0 or len(snake_case__ ) == 0: return lowercase :Optional[int] = sources[0] lowercase :List[str] = sinks[0] # make fake vertex if there are more # than one source or sink if len(snake_case__ ) > 1 or len(snake_case__ ) > 1: lowercase :int = 0 for i in sources: max_input_flow += sum(self.graph[i] ) lowercase :Any = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: lowercase :Union[str, Any] = max_input_flow lowercase :Optional[Any] = 0 lowercase :Union[str, Any] = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: lowercase :List[Any] = max_input_flow lowercase :List[str] = size - 1 def __snake_case ( self : Dict ): '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __snake_case ( self : Tuple , snake_case__ : str ): '''simple docstring''' lowercase :List[str] = algorithm(self ) class __magic_name__ : def __init__( self : str , snake_case__ : Dict ): '''simple docstring''' lowercase :int = flow_network lowercase :str = flow_network.verticesCount lowercase :Optional[int] = flow_network.sourceIndex lowercase :Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that lowercase :Union[str, Any] = flow_network.graph lowercase :str = False def __snake_case ( self : int ): '''simple docstring''' if not self.executed: self._algorithm() lowercase :Tuple = True def __snake_case ( self : str ): '''simple docstring''' pass class __magic_name__ ( __UpperCAmelCase ): def __init__( self : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' super().__init__(snake_case__ ) # use this to save your result lowercase :List[str] = -1 def __snake_case ( self : Optional[Any] ): '''simple docstring''' if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class __magic_name__ ( __UpperCAmelCase ): def __init__( self : List[str] , snake_case__ : Optional[Any] ): '''simple docstring''' super().__init__(snake_case__ ) lowercase :Union[str, Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] lowercase :Union[str, Any] = [0] * self.verticies_count lowercase :Optional[Any] = [0] * self.verticies_count def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule lowercase :int = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list lowercase :int = 0 while i < len(snake_case__ ): lowercase :Any = vertices_list[i] lowercase :str = self.heights[vertex_index] self.process_vertex(snake_case__ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(snake_case__ ) ) lowercase :str = 0 else: i += 1 lowercase :List[str] = sum(self.preflow[self.source_index] ) def __snake_case ( self : Optional[Any] , snake_case__ : Optional[int] ): '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(snake_case__ , snake_case__ ) self.relabel(snake_case__ ) def __snake_case ( self : List[Any] , snake_case__ : Any , snake_case__ : Any ): '''simple docstring''' lowercase :str = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __snake_case ( self : int , snake_case__ : str ): '''simple docstring''' lowercase :Union[str, Any] = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): lowercase :str = self.heights[to_index] if min_height is not None: lowercase :Optional[Any] = min_height + 1 if __name__ == "__main__": UpperCAmelCase = [0] UpperCAmelCase = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] UpperCAmelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network UpperCAmelCase = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate UpperCAmelCase = flow_network.find_maximum_flow() print(F"""maximum flow is {maximum_flow}""")
677
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]: lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {} lowercase :Optional[int] = padding_side return tokenizer( [line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , ) def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple: lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __magic_name__ ( __UpperCAmelCase ): def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ): '''simple docstring''' super().__init__() lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' ) lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' ) lowercase :List[Any] = self.get_char_lens(self.src_file ) lowercase :Tuple = max_source_length lowercase :Optional[int] = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" lowercase :Any = tokenizer lowercase :Tuple = prefix if n_obs is not None: lowercase :List[str] = self.src_lens[:n_obs] lowercase :List[Any] = src_lang lowercase :str = tgt_lang def __len__( self : Any ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : str , snake_case__ : Any ): '''simple docstring''' lowercase :Optional[int] = index + 1 # linecache starts at 1 lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' ) lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , snake_case__ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowercase :Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer ) lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' ) lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' ) lowercase :List[str] = source_inputs['''input_ids'''].squeeze() lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze() lowercase :List[str] = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __snake_case ( snake_case__ : Optional[int] ): '''simple docstring''' return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()] def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] ) lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] ) lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] ) lowercase :str = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer.pad_token_id ) lowercase :Optional[int] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer.pad_token_id ) lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ ) lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ ) lowercase :Optional[int] = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch UpperCAmelCase = getLogger(__name__) def lowerCamelCase (a_ :List[List]) -> Tuple: return list(itertools.chain.from_iterable(a_)) def lowerCamelCase (a_ :str) -> None: lowercase :List[str] = get_git_info() save_json(a_ , os.path.join(a_ , '''git_log.json''')) def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str: with open(a_ , '''w''') as f: json.dump(a_ , a_ , indent=a_ , **a_) def lowerCamelCase (a_ :Dict) -> Union[str, Any]: with open(a_) as f: return json.load(a_) def lowerCamelCase () -> List[str]: lowercase :Dict = git.Repo(search_parent_directories=a_) lowercase :int = { '''repo_id''': str(a_), '''repo_sha''': str(repo.head.object.hexsha), '''repo_branch''': str(repo.active_branch), '''hostname''': str(socket.gethostname()), } return repo_infos def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List: return list(map(a_ , a_)) def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any: with open(a_ , '''wb''') as f: return pickle.dump(a_ , a_) def lowerCamelCase (a_ :List[str]) -> List[str]: def remove_articles(a_ :Union[str, Any]): return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_) def white_space_fix(a_ :Tuple): return " ".join(text.split()) def remove_punc(a_ :int): lowercase :List[Any] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(a_ :int): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(a_)))) def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]: lowercase :Dict = normalize_answer(a_).split() lowercase :int = normalize_answer(a_).split() lowercase :List[Any] = Counter(a_) & Counter(a_) lowercase :Optional[int] = sum(common.values()) if num_same == 0: return 0 lowercase :str = 1.0 * num_same / len(a_) lowercase :Tuple = 1.0 * num_same / len(a_) lowercase :Tuple = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]: return normalize_answer(a_) == normalize_answer(a_) def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict: assert len(a_) == len(a_) lowercase :Any = 0 for hypo, pred in zip(a_ , a_): em += exact_match_score(a_ , a_) if len(a_) > 0: em /= len(a_) return {"em": em} def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]: return model_prefix.startswith('''rag''') def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any: lowercase :List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowercase :str = '''dropout_rate''' for p in extra_params: if getattr(a_ , a_ , a_): if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]): logger.info('''config doesn\'t have a `{}` attribute'''.format(a_)) delattr(a_ , a_) continue lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p] setattr(a_ , a_ , getattr(a_ , a_)) delattr(a_ , a_) return hparams, config
677
1
"""simple docstring""" import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowerCamelCase (a_ :List[str]) -> Any: return EnvironmentCommand() def lowerCamelCase (a_ :Dict) -> Optional[Any]: return EnvironmentCommand(args.accelerate_config_file) class __magic_name__ ( __UpperCAmelCase ): @staticmethod def __snake_case ( snake_case__ : ArgumentParser ): '''simple docstring''' lowercase :Optional[int] = parser.add_parser('''env''' ) download_parser.set_defaults(func=snake_case__ ) download_parser.add_argument( '''--accelerate-config_file''' , default=snake_case__ , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=snake_case__ ) def __init__( self : Any , snake_case__ : List[str] , *snake_case__ : Dict ): '''simple docstring''' lowercase :Optional[Any] = accelerate_config_file def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase :Optional[Any] = '''not installed''' if is_safetensors_available(): import safetensors lowercase :List[Any] = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors lowercase :Dict = f"""{safetensors.__version__} but is ignored because of PyTorch version too old.""" lowercase :Any = '''not installed''' lowercase :int = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file lowercase :List[str] = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(snake_case__ ): lowercase :str = load_config_from_file(self._accelerate_config_file ).to_dict() lowercase :Any = ( '''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(snake_case__ , snake_case__ ) else f"""\t{accelerate_config}""" ) lowercase :Dict = '''not installed''' lowercase :Optional[Any] = '''NA''' if is_torch_available(): import torch lowercase :Optional[int] = torch.__version__ lowercase :Optional[int] = torch.cuda.is_available() lowercase :List[str] = '''not installed''' lowercase :Dict = '''NA''' if is_tf_available(): import tensorflow as tf lowercase :Tuple = tf.__version__ try: # deprecated in v2.1 lowercase :List[Any] = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool lowercase :Tuple = bool(tf.config.list_physical_devices('''GPU''' ) ) lowercase :Dict = '''not installed''' lowercase :int = '''not installed''' lowercase :Dict = '''not installed''' lowercase :Tuple = '''NA''' if is_flax_available(): import flax import jax import jaxlib lowercase :Tuple = flax.__version__ lowercase :str = jax.__version__ lowercase :Optional[Any] = jaxlib.__version__ lowercase :Tuple = jax.lib.xla_bridge.get_backend().platform lowercase :Optional[int] = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': f"""{safetensors_version}""", '''Accelerate version''': f"""{accelerate_version}""", '''Accelerate config''': f"""{accelerate_config_str}""", '''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""", '''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""", '''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""", '''Jax version''': f"""{jax_version}""", '''JaxLib version''': f"""{jaxlib_version}""", '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(snake_case__ ) ) return info @staticmethod def __snake_case ( snake_case__ : Optional[int] ): '''simple docstring''' return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
677
"""simple docstring""" def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Tuple , a_ :List[Any]) -> str: if height >= 1: move_tower(height - 1 , a_ , a_ , a_) move_disk(a_ , a_) move_tower(height - 1 , a_ , a_ , a_) def lowerCamelCase (a_ :int , a_ :Union[str, Any]) -> str: print('''moving disk from''' , a_ , '''to''' , a_) def lowerCamelCase () -> Tuple: lowercase :int = int(input('''Height of hanoi: ''').strip()) move_tower(a_ , '''A''' , '''B''' , '''C''') if __name__ == "__main__": main()
677
1
"""simple docstring""" import warnings warnings.warn( '''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: ''' '''`from accelerate import find_executable_batch_size` to avoid this warning.''', FutureWarning, )
677
"""simple docstring""" from sklearn.metrics import mean_squared_error import datasets UpperCAmelCase = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' UpperCAmelCase = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' UpperCAmelCase = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def __snake_case ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def __snake_case ( self : Optional[Any] ): '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def __snake_case ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None , snake_case__ : List[Any]="uniform_average" , snake_case__ : Dict=True ): '''simple docstring''' lowercase :Dict = mean_squared_error( snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ ) return {"mse": mse}
677
1
"""simple docstring""" UpperCAmelCase = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] UpperCAmelCase = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
677
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class __magic_name__ ( __UpperCAmelCase ): @staticmethod @abstractmethod def __snake_case ( snake_case__ : ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def __snake_case ( self : Optional[Any] ): '''simple docstring''' raise NotImplementedError()
677
1
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]: lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {} lowercase :Optional[int] = padding_side return tokenizer( [line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , ) def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple: lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __magic_name__ ( __UpperCAmelCase ): def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ): '''simple docstring''' super().__init__() lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' ) lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' ) lowercase :List[Any] = self.get_char_lens(self.src_file ) lowercase :Tuple = max_source_length lowercase :Optional[int] = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" lowercase :Any = tokenizer lowercase :Tuple = prefix if n_obs is not None: lowercase :List[str] = self.src_lens[:n_obs] lowercase :List[Any] = src_lang lowercase :str = tgt_lang def __len__( self : Any ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : str , snake_case__ : Any ): '''simple docstring''' lowercase :Optional[int] = index + 1 # linecache starts at 1 lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' ) lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , snake_case__ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowercase :Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer ) lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' ) lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' ) lowercase :List[str] = source_inputs['''input_ids'''].squeeze() lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze() lowercase :List[str] = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __snake_case ( snake_case__ : Optional[int] ): '''simple docstring''' return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()] def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] ) lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] ) lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] ) lowercase :str = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer.pad_token_id ) lowercase :Optional[int] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer.pad_token_id ) lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ ) lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ ) lowercase :Optional[int] = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch UpperCAmelCase = getLogger(__name__) def lowerCamelCase (a_ :List[List]) -> Tuple: return list(itertools.chain.from_iterable(a_)) def lowerCamelCase (a_ :str) -> None: lowercase :List[str] = get_git_info() save_json(a_ , os.path.join(a_ , '''git_log.json''')) def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str: with open(a_ , '''w''') as f: json.dump(a_ , a_ , indent=a_ , **a_) def lowerCamelCase (a_ :Dict) -> Union[str, Any]: with open(a_) as f: return json.load(a_) def lowerCamelCase () -> List[str]: lowercase :Dict = git.Repo(search_parent_directories=a_) lowercase :int = { '''repo_id''': str(a_), '''repo_sha''': str(repo.head.object.hexsha), '''repo_branch''': str(repo.active_branch), '''hostname''': str(socket.gethostname()), } return repo_infos def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List: return list(map(a_ , a_)) def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any: with open(a_ , '''wb''') as f: return pickle.dump(a_ , a_) def lowerCamelCase (a_ :List[str]) -> List[str]: def remove_articles(a_ :Union[str, Any]): return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_) def white_space_fix(a_ :Tuple): return " ".join(text.split()) def remove_punc(a_ :int): lowercase :List[Any] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(a_ :int): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(a_)))) def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]: lowercase :Dict = normalize_answer(a_).split() lowercase :int = normalize_answer(a_).split() lowercase :List[Any] = Counter(a_) & Counter(a_) lowercase :Optional[int] = sum(common.values()) if num_same == 0: return 0 lowercase :str = 1.0 * num_same / len(a_) lowercase :Tuple = 1.0 * num_same / len(a_) lowercase :Tuple = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]: return normalize_answer(a_) == normalize_answer(a_) def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict: assert len(a_) == len(a_) lowercase :Any = 0 for hypo, pred in zip(a_ , a_): em += exact_match_score(a_ , a_) if len(a_) > 0: em /= len(a_) return {"em": em} def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]: return model_prefix.startswith('''rag''') def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any: lowercase :List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowercase :str = '''dropout_rate''' for p in extra_params: if getattr(a_ , a_ , a_): if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]): logger.info('''config doesn\'t have a `{}` attribute'''.format(a_)) delattr(a_ , a_) continue lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p] setattr(a_ , a_ , getattr(a_ , a_)) delattr(a_ , a_) return hparams, config
677
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
677
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCAmelCase = logging.get_logger(__name__) class __magic_name__ ( __UpperCAmelCase ): __A : Optional[int] = ["input_features", "is_longer"] def __init__( self : Tuple , snake_case__ : Optional[Any]=6_4 , snake_case__ : int=4_8_0_0_0 , snake_case__ : Union[str, Any]=4_8_0 , snake_case__ : Optional[int]=1_0 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : str=0.0 , snake_case__ : Optional[int]=False , snake_case__ : float = 0 , snake_case__ : float = 1_4_0_0_0 , snake_case__ : int = None , snake_case__ : str = "fusion" , snake_case__ : str = "repeatpad" , **snake_case__ : str , ): '''simple docstring''' super().__init__( feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , ) lowercase :Optional[int] = top_db lowercase :Any = truncation lowercase :Optional[Any] = padding lowercase :Any = fft_window_size lowercase :Optional[Any] = (fft_window_size >> 1) + 1 lowercase :int = hop_length lowercase :Any = max_length_s lowercase :str = max_length_s * sampling_rate lowercase :str = sampling_rate lowercase :Dict = frequency_min lowercase :List[str] = frequency_max lowercase :Union[str, Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm=snake_case__ , mel_scale='''htk''' , ) lowercase :List[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm='''slaney''' , mel_scale='''slaney''' , ) def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :List[Any] = copy.deepcopy(self.__dict__ ) lowercase :Optional[Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __snake_case ( self : List[Any] , snake_case__ : np.array , snake_case__ : Optional[np.array] = None ): '''simple docstring''' lowercase :Tuple = spectrogram( snake_case__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case__ , log_mel='''dB''' , ) return log_mel_spectrogram.T def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] ): '''simple docstring''' lowercase :Dict = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowercase :List[str] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowercase :List[str] = [0] # randomly choose index for each part lowercase :str = np.random.choice(ranges[0] ) lowercase :Any = np.random.choice(ranges[1] ) lowercase :List[str] = np.random.choice(ranges[2] ) lowercase :Any = mel[idx_front : idx_front + chunk_frames, :] lowercase :Dict = mel[idx_middle : idx_middle + chunk_frames, :] lowercase :Optional[int] = mel[idx_back : idx_back + chunk_frames, :] lowercase :Tuple = torch.tensor(mel[None, None, :] ) lowercase :Union[str, Any] = torch.nn.functional.interpolate( snake_case__ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=snake_case__ ) lowercase :Union[str, Any] = mel_shrink[0][0].numpy() lowercase :List[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __snake_case ( self : Dict , snake_case__ : np.array , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : str ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowercase :List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowercase :str = len(snake_case__ ) - max_length lowercase :Optional[Any] = np.random.randint(0 , overflow + 1 ) lowercase :List[str] = waveform[idx : idx + max_length] lowercase :Union[str, Any] = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowercase :int = self._np_extract_fbank_features(snake_case__ , self.mel_filters ) lowercase :List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowercase :Optional[Any] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowercase :List[str] = np.stack([mel, mel, mel, mel] , axis=0 ) lowercase :Dict = False else: lowercase :Union[str, Any] = self._random_mel_fusion(snake_case__ , snake_case__ , snake_case__ ) lowercase :Dict = True else: raise NotImplementedError(f"""data_truncating {truncation} not implemented""" ) else: lowercase :List[str] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowercase :List[str] = int(max_length / len(snake_case__ ) ) lowercase :Optional[Any] = np.stack(np.tile(snake_case__ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowercase :Optional[Any] = int(max_length / len(snake_case__ ) ) lowercase :int = np.stack(np.tile(snake_case__ , snake_case__ ) ) lowercase :Optional[int] = np.pad(snake_case__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": lowercase :str = self._np_extract_fbank_features(snake_case__ , self.mel_filters ) lowercase :Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowercase :Any = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[str] , snake_case__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case__ : str = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Union[str, Any] , ): '''simple docstring''' lowercase :Tuple = truncation if truncation is not None else self.truncation lowercase :Tuple = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowercase :Union[str, Any] = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase :List[str] = is_batched_numpy or ( isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase :List[Any] = [np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case__ , np.ndarray ): lowercase :str = np.asarray(snake_case__ , dtype=np.floataa ) elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase :Dict = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase :Optional[int] = [np.asarray(snake_case__ )] # convert to mel spectrogram, truncate and pad if needed. lowercase :str = [ self._get_input_mel(snake_case__ , max_length if max_length else self.nb_max_samples , snake_case__ , snake_case__ ) for waveform in raw_speech ] lowercase :Optional[int] = [] lowercase :Union[str, Any] = [] for mel, longer in padded_inputs: input_mel.append(snake_case__ ) is_longer.append(snake_case__ ) if truncation == "fusion" and sum(snake_case__ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowercase :Dict = np.random.randint(0 , len(snake_case__ ) ) lowercase :Tuple = True if isinstance(input_mel[0] , snake_case__ ): lowercase :Union[str, Any] = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowercase :List[Any] = [[longer] for longer in is_longer] lowercase :List[str] = {'''input_features''': input_mel, '''is_longer''': is_longer} lowercase :Dict = BatchFeature(snake_case__ ) if return_tensors is not None: lowercase :Optional[int] = input_features.convert_to_tensors(snake_case__ ) return input_features
677
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ ( unittest.TestCase ): def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ): '''simple docstring''' lowercase :Union[str, Any] = parent lowercase :Optional[Any] = batch_size lowercase :Dict = image_size lowercase :Any = num_channels lowercase :List[str] = embeddings_size lowercase :Union[str, Any] = hidden_sizes lowercase :Any = depths lowercase :Dict = is_training lowercase :Any = use_labels lowercase :Any = hidden_act lowercase :List[str] = num_labels lowercase :List[Any] = scope lowercase :int = len(snake_case__ ) def __snake_case ( self : Any ): '''simple docstring''' lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase :Union[str, Any] = self.get_config() return config, pixel_values def __snake_case ( self : Dict ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ): '''simple docstring''' lowercase :Any = FlaxRegNetModel(config=snake_case__ ) lowercase :str = model(snake_case__ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ): '''simple docstring''' lowercase :Tuple = self.num_labels lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ ) lowercase :Union[str, Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : str ): '''simple docstring''' lowercase :int = self.prepare_config_and_inputs() lowercase , lowercase :Tuple = config_and_inputs lowercase :Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ): __A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __A : str = False __A : Tuple = False __A : Dict = False def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :Dict = FlaxRegNetModelTester(self ) lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def __snake_case ( self : Union[str, Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __snake_case ( self : List[Any] ): '''simple docstring''' return def __snake_case ( self : str ): '''simple docstring''' lowercase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __snake_case ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __snake_case ( self : List[Any] ): '''simple docstring''' pass def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Union[str, Any] = model_class(snake_case__ ) lowercase :int = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase :Tuple = [*signature.parameters.keys()] lowercase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , snake_case__ ) def __snake_case ( self : Tuple ): '''simple docstring''' def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): lowercase :int = model_class(snake_case__ ) lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase :Dict = self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Optional[int] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase :str = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) lowercase :List[Any] = model_class(snake_case__ ) @jax.jit def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ): return model(pixel_values=snake_case__ , **snake_case__ ) with self.subTest('''JIT Enabled''' ): lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase () -> Tuple: lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_flax class __magic_name__ ( unittest.TestCase ): @cached_property def __snake_case ( self : int ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) lowercase :Optional[Any] = self.default_image_processor lowercase :Dict = prepare_img() lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' ) lowercase :List[str] = model(**snake_case__ ) # verify the logits lowercase :Any = (1, 1_0_0_0) self.assertEqual(outputs.logits.shape , snake_case__ ) lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
677
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : int=3 , snake_case__ : List[Any]=3_2 , snake_case__ : int=3 , snake_case__ : Optional[int]=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : List[str]=[1, 1, 2, 1] , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : List[str]="relu" , snake_case__ : List[Any]=3 , snake_case__ : Any=None , ): '''simple docstring''' lowercase :Any = parent lowercase :Dict = batch_size lowercase :Any = image_size lowercase :Optional[int] = num_channels lowercase :int = embeddings_size lowercase :str = hidden_sizes lowercase :int = depths lowercase :str = is_training lowercase :Dict = use_labels lowercase :Any = hidden_act lowercase :Union[str, Any] = num_labels lowercase :Union[str, Any] = scope lowercase :Optional[int] = len(snake_case__ ) def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase :List[Any] = None if self.use_labels: lowercase :Dict = ids_tensor([self.batch_size] , self.num_labels ) lowercase :Tuple = self.get_config() return config, pixel_values, labels def __snake_case ( self : int ): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __snake_case ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] ): '''simple docstring''' lowercase :Any = TFResNetModel(config=snake_case__ ) lowercase :Dict = model(snake_case__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def __snake_case ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' lowercase :int = self.num_labels lowercase :Dict = TFResNetForImageClassification(snake_case__ ) lowercase :Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : Dict ): '''simple docstring''' lowercase :List[Any] = self.prepare_config_and_inputs() lowercase , lowercase , lowercase :List[Any] = config_and_inputs lowercase :Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __A : Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __A : List[Any] = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) __A : Union[str, Any] = False __A : str = False __A : List[str] = False __A : str = False __A : str = False def __snake_case ( self : Any ): '''simple docstring''' lowercase :int = TFResNetModelTester(self ) lowercase :Optional[int] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def __snake_case ( self : Tuple ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __snake_case ( self : int ): '''simple docstring''' return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def __snake_case ( self : int ): '''simple docstring''' pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def __snake_case ( self : List[str] ): '''simple docstring''' pass def __snake_case ( self : Any ): '''simple docstring''' lowercase , lowercase :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Union[str, Any] = model_class(snake_case__ ) lowercase :Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase :str = [*signature.parameters.keys()] lowercase :Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , snake_case__ ) def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __snake_case ( self : Optional[int] ): '''simple docstring''' def check_hidden_states_output(snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[Any] ): lowercase :int = model_class(snake_case__ ) lowercase :Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase :List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowercase :Tuple = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase :List[Any] = layer_type lowercase :List[Any] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase :str = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def __snake_case ( self : List[Any] ): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase :int = TFResNetModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowerCamelCase () -> List[Any]: lowercase :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_tf @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __snake_case ( self : Optional[int] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __snake_case ( self : str ): '''simple docstring''' lowercase :Union[str, Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase :Union[str, Any] = self.default_image_processor lowercase :Dict = prepare_img() lowercase :int = image_processor(images=snake_case__ , return_tensors='''tf''' ) # forward pass lowercase :List[Any] = model(**snake_case__ ) # verify the logits lowercase :Tuple = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowercase :str = tf.constant([-11.10_69, -9.78_77, -8.37_77] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
677
"""simple docstring""" UpperCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]: lowercase :str = set() # keep track of all the paths to be checked lowercase :Dict = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowercase :Optional[int] = queue.pop(0) # get the last node from the path lowercase :Any = path[-1] if node not in explored: lowercase :int = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowercase :List[Any] = list(a_) new_path.append(a_) queue.append(a_) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(a_) # in case there's no path between the 2 nodes return [] def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowercase :List[str] = [start] lowercase :Optional[Any] = set(a_) # Keep tab on distances from `start` node. lowercase :Union[str, Any] = {start: 0, target: -1} while queue: lowercase :Union[str, Any] = queue.pop(0) if node == target: lowercase :Any = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(a_) queue.append(a_) lowercase :Dict = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
677
1
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase (a_ :str , a_ :str , a_ :str) -> Any: def get_masked_lm_array(a_ :str): lowercase :Tuple = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" lowercase :Dict = tf.train.load_variable(a_ , a_) if "kernel" in name: lowercase :Optional[Any] = array.transpose() return torch.from_numpy(a_) def get_encoder_array(a_ :str): lowercase :Union[str, Any] = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" lowercase :int = tf.train.load_variable(a_ , a_) if "kernel" in name: lowercase :int = array.transpose() return torch.from_numpy(a_) def get_encoder_layer_array(a_ :int , a_ :str): lowercase :str = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" lowercase :Dict = tf.train.load_variable(a_ , a_) if "kernel" in name: lowercase :Any = array.transpose() return torch.from_numpy(a_) def get_encoder_attention_layer_array(a_ :int , a_ :str , a_ :Any): lowercase :Any = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" lowercase :int = tf.train.load_variable(a_ , a_) lowercase :Optional[int] = array.reshape(a_) if "kernel" in name: lowercase :Union[str, Any] = array.transpose() return torch.from_numpy(a_) print(F"""Loading model based on config from {config_path}...""") lowercase :List[Any] = BertConfig.from_json_file(a_) lowercase :List[Any] = BertForMaskedLM(a_) # Layers for layer_index in range(0 , config.num_hidden_layers): lowercase :BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase :BertSelfAttention = layer.attention.self lowercase :List[str] = get_encoder_attention_layer_array( a_ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape) lowercase :Tuple = get_encoder_attention_layer_array( a_ , '''_query_dense/bias''' , self_attn.query.bias.data.shape) lowercase :Optional[int] = get_encoder_attention_layer_array( a_ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape) lowercase :List[Any] = get_encoder_attention_layer_array( a_ , '''_key_dense/bias''' , self_attn.key.bias.data.shape) lowercase :List[str] = get_encoder_attention_layer_array( a_ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape) lowercase :Union[str, Any] = get_encoder_attention_layer_array( a_ , '''_value_dense/bias''' , self_attn.value.bias.data.shape) # Self-attention Output lowercase :BertSelfOutput = layer.attention.output lowercase :List[str] = get_encoder_attention_layer_array( a_ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape) lowercase :Any = get_encoder_attention_layer_array( a_ , '''_output_dense/bias''' , self_output.dense.bias.data.shape) lowercase :Optional[int] = get_encoder_layer_array(a_ , '''_attention_layer_norm/gamma''') lowercase :Dict = get_encoder_layer_array(a_ , '''_attention_layer_norm/beta''') # Intermediate lowercase :BertIntermediate = layer.intermediate lowercase :List[str] = get_encoder_layer_array(a_ , '''_intermediate_dense/kernel''') lowercase :Optional[Any] = get_encoder_layer_array(a_ , '''_intermediate_dense/bias''') # Output lowercase :BertOutput = layer.output lowercase :List[str] = get_encoder_layer_array(a_ , '''_output_dense/kernel''') lowercase :str = get_encoder_layer_array(a_ , '''_output_dense/bias''') lowercase :Any = get_encoder_layer_array(a_ , '''_output_layer_norm/gamma''') lowercase :List[str] = get_encoder_layer_array(a_ , '''_output_layer_norm/beta''') # Embeddings lowercase :Tuple = get_encoder_array('''_position_embedding_layer/embeddings''') lowercase :Any = get_encoder_array('''_type_embedding_layer/embeddings''') lowercase :Tuple = get_encoder_array('''_embedding_norm_layer/gamma''') lowercase :Optional[Any] = get_encoder_array('''_embedding_norm_layer/beta''') # LM Head lowercase :Any = model.cls.predictions.transform lowercase :List[str] = get_masked_lm_array('''dense/kernel''') lowercase :Optional[Any] = get_masked_lm_array('''dense/bias''') lowercase :List[str] = get_masked_lm_array('''layer_norm/gamma''') lowercase :List[str] = get_masked_lm_array('''layer_norm/beta''') lowercase :str = get_masked_lm_array('''embedding_table''') # Pooling lowercase :str = BertPooler(config=a_) lowercase :BertPooler = get_encoder_array('''_pooler_layer/kernel''') lowercase :BertPooler = get_encoder_array('''_pooler_layer/bias''') # Export final model model.save_pretrained(a_) # Integration test - should load without any errors ;) lowercase :Dict = BertForMaskedLM.from_pretrained(a_) print(new_model.eval()) print('''Model conversion was done sucessfully!''') if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model.''', ) UpperCAmelCase = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
677
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCAmelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase (a_ :str , a_ :List[str]=100 , a_ :Optional[Any]=" ") -> List[str]: lowercase :str = text.split(a_) return [character.join(text[i : i + n]).strip() for i in range(0 , len(a_) , a_)] def lowerCamelCase (a_ :dict) -> dict: lowercase , lowercase :str = [], [] for title, text in zip(documents['''title'''] , documents['''text''']): if text is not None: for passage in split_text(a_): titles.append(title if title is not None else '''''') texts.append(a_) return {"title": titles, "text": texts} def lowerCamelCase (a_ :dict , a_ :DPRContextEncoder , a_ :DPRContextEncoderTokenizerFast) -> dict: lowercase :Tuple = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''')['''input_ids'''] lowercase :Optional[Any] = ctx_encoder(input_ids.to(device=a_) , return_dict=a_).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase (a_ :"RagExampleArguments" , a_ :"ProcessingArguments" , a_ :"IndexHnswArguments" , ) -> Any: ###################################### logger.info('''Step 1 - Create the dataset''') ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase :List[Any] = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text''']) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase :Optional[Any] = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc) # And compute the embeddings lowercase :str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a_) lowercase :Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) lowercase :str = Features( {'''text''': Value('''string'''), '''title''': Value('''string'''), '''embeddings''': Sequence(Value('''float32'''))}) # optional, save as float32 instead of float64 to save space lowercase :Optional[Any] = dataset.map( partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , ) # And finally save your dataset lowercase :str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''') dataset.save_to_disk(a_) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''') ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase :str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index('''embeddings''' , custom_index=a_) # And save the index lowercase :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''') dataset.get_index('''embeddings''').save(a_) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __magic_name__ : __A : str = field( default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) __A : Optional[str] = field( default=__UpperCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) __A : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) __A : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) __A : Optional[str] = field( default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class __magic_name__ : __A : Optional[int] = field( default=__UpperCAmelCase , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) __A : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class __magic_name__ : __A : int = field( default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) __A : int = field( default=1_28 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCAmelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
677
1
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def lowerCamelCase (a_ :str) -> None: lowercase , lowercase :str = analyze_text(a_) lowercase :int = list(''' ''' + ascii_lowercase) # what is our total sum of probabilities. lowercase :int = sum(single_char_strings.values()) # one length string lowercase :Optional[Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowercase :int = single_char_strings[ch] lowercase :Dict = my_str / all_sum my_fir_sum += prob * math.loga(a_) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum):.1f}""") # two len string lowercase :List[str] = sum(two_char_strings.values()) lowercase :Optional[Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowercase :Any = cha + cha if sequence in two_char_strings: lowercase :List[str] = two_char_strings[sequence] lowercase :Union[str, Any] = int(a_) / all_sum my_sec_sum += prob * math.loga(a_) # print second entropy print(F"""{round(-1 * my_sec_sum):.1f}""") # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}""") def lowerCamelCase (a_ :str) -> tuple[dict, dict]: lowercase :Union[str, Any] = Counter() # type: ignore lowercase :List[str] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(a_) - 1): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def lowerCamelCase () -> List[str]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
677
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCAmelCase = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
677
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : def __init__( self : int , snake_case__ : Optional[int] , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Dict=True , snake_case__ : List[str]=True , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=9_9 , snake_case__ : List[Any]=3_2 , snake_case__ : Dict=2 , snake_case__ : Dict=4 , snake_case__ : str=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : int=2 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : Optional[int]=4 , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=1_0_0_0 , ): '''simple docstring''' lowercase :Optional[int] = parent lowercase :str = batch_size lowercase :List[str] = seq_length lowercase :List[str] = is_training lowercase :str = use_input_mask lowercase :Optional[int] = use_token_type_ids lowercase :str = use_labels lowercase :Dict = vocab_size lowercase :Dict = hidden_size lowercase :Dict = num_hidden_layers lowercase :Optional[int] = num_attention_heads lowercase :List[str] = intermediate_size lowercase :Any = hidden_act lowercase :Optional[Any] = hidden_dropout_prob lowercase :int = attention_probs_dropout_prob lowercase :List[str] = max_position_embeddings lowercase :Tuple = type_vocab_size lowercase :Optional[int] = type_sequence_label_size lowercase :str = initializer_range lowercase :Dict = num_labels lowercase :Optional[int] = num_choices lowercase :List[str] = scope lowercase :Dict = range_bbox def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowercase :int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase :List[Any] = bbox[i, j, 3] lowercase :int = bbox[i, j, 1] lowercase :Any = t if bbox[i, j, 2] < bbox[i, j, 0]: lowercase :Optional[int] = bbox[i, j, 2] lowercase :Union[str, Any] = bbox[i, j, 0] lowercase :Optional[Any] = t lowercase :int = tf.convert_to_tensor(snake_case__ ) lowercase :List[Any] = None if self.use_input_mask: lowercase :Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowercase :Dict = None if self.use_token_type_ids: lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase :List[str] = None lowercase :Optional[Any] = None lowercase :Dict = None if self.use_labels: lowercase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase :Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase :Optional[int] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __snake_case ( self : Dict , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase :int = TFLayoutLMModel(config=snake_case__ ) lowercase :List[Any] = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) lowercase :Optional[Any] = model(snake_case__ , snake_case__ , token_type_ids=snake_case__ ) lowercase :Any = model(snake_case__ , snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __snake_case ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[Any] ): '''simple docstring''' lowercase :Optional[int] = TFLayoutLMForMaskedLM(config=snake_case__ ) lowercase :str = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[Any] ): '''simple docstring''' lowercase :Optional[int] = self.num_labels lowercase :Tuple = TFLayoutLMForSequenceClassification(config=snake_case__ ) lowercase :str = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ): '''simple docstring''' lowercase :Optional[int] = self.num_labels lowercase :Union[str, Any] = TFLayoutLMForTokenClassification(config=snake_case__ ) lowercase :List[str] = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Tuple ): '''simple docstring''' lowercase :int = TFLayoutLMForQuestionAnswering(config=snake_case__ ) lowercase :List[Any] = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __snake_case ( self : Dict ): '''simple docstring''' lowercase :str = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) :List[Any] = config_and_inputs lowercase :Union[str, Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __A : int = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __A : List[Any] = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __A : int = False __A : List[str] = True __A : List[Any] = 10 def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase :List[Any] = TFLayoutLMModelTester(self ) lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __snake_case ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __snake_case ( self : str ): '''simple docstring''' lowercase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case__ ) def __snake_case ( self : Dict ): '''simple docstring''' lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) @slow def __snake_case ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase :Tuple = TFLayoutLMModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def __snake_case ( self : int ): '''simple docstring''' pass def lowerCamelCase () -> Union[str, Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowercase :List[Any] = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]]) # noqa: E231 lowercase :Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231 lowercase :Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231 lowercase :Union[str, Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231 # these are sequence labels (i.e. at the token level) lowercase :Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]]) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): @slow def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :Any = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) lowercase , lowercase , lowercase , lowercase , lowercase :Any = prepare_layoutlm_batch_inputs() # forward pass lowercase :Optional[int] = model(input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) # test the sequence output on [0, :3, :3] lowercase :int = tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-3 ) ) # test the pooled output on [1, :3] lowercase :str = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case__ , atol=1e-3 ) ) @slow def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Tuple = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) lowercase , lowercase , lowercase , lowercase , lowercase :List[str] = prepare_layoutlm_batch_inputs() # forward pass lowercase :List[str] = model( input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowercase :Dict = outputs.loss lowercase :List[str] = (2,) self.assertEqual(loss.shape , snake_case__ ) # test the shape of the logits lowercase :Dict = outputs.logits lowercase :Union[str, Any] = (2, 2) self.assertEqual(logits.shape , snake_case__ ) @slow def __snake_case ( self : Optional[int] ): '''simple docstring''' lowercase :Tuple = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3 ) lowercase , lowercase , lowercase , lowercase , lowercase :List[str] = prepare_layoutlm_batch_inputs() # forward pass lowercase :Dict = model( input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) # test the shape of the logits lowercase :Dict = outputs.logits lowercase :List[Any] = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape , snake_case__ ) @slow def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) lowercase , lowercase , lowercase , lowercase , lowercase :Union[str, Any] = prepare_layoutlm_batch_inputs() # forward pass lowercase :List[Any] = model(input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) # test the shape of the logits lowercase :Dict = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape , snake_case__ ) self.assertEqual(outputs.end_logits.shape , snake_case__ )
677
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase = logging.get_logger(__name__) class __magic_name__ : def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ): '''simple docstring''' if not conversation_id: lowercase :List[Any] = uuid.uuida() if past_user_inputs is None: lowercase :Union[str, Any] = [] if generated_responses is None: lowercase :List[str] = [] lowercase :uuid.UUID = conversation_id lowercase :List[str] = past_user_inputs lowercase :List[str] = generated_responses lowercase :Optional[str] = text def __eq__( self : Optional[Any] , snake_case__ : str ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ f"""with: \"{text}\".""" ) lowercase :List[str] = text else: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowercase :Optional[int] = text def __snake_case ( self : Any ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowercase :Tuple = None def __snake_case ( self : Tuple , snake_case__ : str ): '''simple docstring''' self.generated_responses.append(snake_case__ ) def __snake_case ( self : Tuple ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Dict ): '''simple docstring''' lowercase :int = f"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowercase :Dict = '''user''' if is_user else '''bot''' output += f"""{name} >> {text} \n""" return output @add_end_docstrings( __UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class __magic_name__ ( __UpperCAmelCase ): def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ): '''simple docstring''' super().__init__(*snake_case__ , **snake_case__ ) if self.tokenizer.pad_token_id is None: lowercase :Any = self.tokenizer.eos_token def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase :str = {} lowercase :List[str] = {} lowercase :Tuple = {} if min_length_for_response is not None: lowercase :Dict = min_length_for_response if minimum_tokens is not None: lowercase :Union[str, Any] = minimum_tokens if "max_length" in generate_kwargs: lowercase :List[Any] = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowercase :Dict = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case__ ) return preprocess_params, forward_params, postprocess_params def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ): '''simple docstring''' lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ ) if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1: return outputs[0] return outputs def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ ) if self.framework == "pt": lowercase :int = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowercase :Any = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ): '''simple docstring''' lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length ) lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowercase :int = max_length - minimum_tokens lowercase :int = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: lowercase :int = model_inputs['''attention_mask'''][:, -trim:] lowercase :int = model_inputs.pop('''conversation''' ) lowercase :Union[str, Any] = max_length lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ ) if self.model.config.is_encoder_decoder: lowercase :List[Any] = 1 else: lowercase :Optional[Any] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ): '''simple docstring''' lowercase :Dict = model_outputs['''output_ids'''] lowercase :Dict = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , ) lowercase :Optional[int] = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(snake_case__ ) return conversation def __snake_case ( self : List[Any] , snake_case__ : Conversation ): '''simple docstring''' lowercase :str = self.tokenizer.eos_token_id lowercase :List[Any] = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ) if len(snake_case__ ) > self.tokenizer.model_max_length: lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
677
1
"""simple docstring""" from __future__ import annotations import math class __magic_name__ : def __init__( self : str , snake_case__ : int ): '''simple docstring''' lowercase :str = size # approximate the overall size of segment tree with given value lowercase :Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update lowercase :Dict = [0 for i in range(0 , 4 * size )] lowercase :List[str] = [0 for i in range(0 , 4 * size )] # flag for lazy update def __snake_case ( self : List[Any] , snake_case__ : int ): '''simple docstring''' return idx * 2 def __snake_case ( self : List[Any] , snake_case__ : int ): '''simple docstring''' return idx * 2 + 1 def __snake_case ( self : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : list[int] ): '''simple docstring''' if left_element == right_element: lowercase :str = a[left_element - 1] else: lowercase :Union[str, Any] = (left_element + right_element) // 2 self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ ) self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ ) lowercase :List[Any] = max( self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] ) def __snake_case ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): '''simple docstring''' if self.flag[idx] is True: lowercase :str = self.lazy[idx] lowercase :Union[str, Any] = False if left_element != right_element: lowercase :Optional[Any] = self.lazy[idx] lowercase :Union[str, Any] = self.lazy[idx] lowercase :Union[str, Any] = True lowercase :int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: lowercase :Tuple = val if left_element != right_element: lowercase :str = val lowercase :Union[str, Any] = val lowercase :Dict = True lowercase :Union[str, Any] = True return True lowercase :Dict = (left_element + right_element) // 2 self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase :Dict = max( self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] ) return True def __snake_case ( self : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): '''simple docstring''' if self.flag[idx] is True: lowercase :Union[str, Any] = self.lazy[idx] lowercase :int = False if left_element != right_element: lowercase :int = self.lazy[idx] lowercase :Any = self.lazy[idx] lowercase :Tuple = True lowercase :Dict = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] lowercase :Dict = (left_element + right_element) // 2 lowercase :Optional[int] = self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase :List[str] = self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ ) return max(snake_case__ , snake_case__ ) def __str__( self : Union[str, Any] ): '''simple docstring''' return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": UpperCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] UpperCAmelCase = 15 UpperCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
677
"""simple docstring""" def lowerCamelCase (a_ :int = 100) -> int: lowercase :Union[str, Any] = set() lowercase :List[Any] = 0 lowercase :Dict = n + 1 # maximum limit for a in range(2 , a_): for b in range(2 , a_): lowercase :Tuple = a**b # calculates the current power collect_powers.add(a_) # adds the result to the set return len(a_) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
677
1
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers UpperCAmelCase = float('''nan''') class __magic_name__ : def __init__( self : List[Any] , snake_case__ : Any ): '''simple docstring''' lowercase :Optional[Any] = sys.stdout lowercase :Optional[int] = open(snake_case__ , '''a''' ) def __getattr__( self : Tuple , snake_case__ : Optional[int] ): '''simple docstring''' return getattr(self.stdout , snake_case__ ) def __snake_case ( self : Tuple , snake_case__ : Dict ): '''simple docstring''' self.stdout.write(snake_case__ ) # strip tqdm codes self.file.write(re.sub(r'''^.*\r''' , '''''' , snake_case__ , 0 , re.M ) ) def lowerCamelCase (a_ :str=80 , a_ :Dict=False) -> List[Any]: lowercase :int = [] # deal with critical env vars lowercase :Optional[Any] = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: lowercase :Optional[int] = os.environ.get(a_ , a_) if val is not None: cmd.append(F"""{key}={val}""") # python executable (not always needed if the script is executable) lowercase :str = sys.executable if full_python_path else sys.executable.split('''/''')[-1] cmd.append(a_) # now the normal args cmd += list(map(shlex.quote , sys.argv)) # split up into up to MAX_WIDTH lines with shell multi-line escapes lowercase :List[Any] = [] lowercase :List[Any] = '''''' while len(a_) > 0: current_line += F"""{cmd.pop(0)} """ if len(a_) == 0 or len(a_) + len(cmd[0]) + 1 > max_width - 1: lines.append(a_) lowercase :List[Any] = '''''' return "\\\n".join(a_) def lowerCamelCase (a_ :int , a_ :str) -> Optional[int]: # unwrap multi-line input lowercase :Optional[Any] = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd) # remove --output_dir if any and set our own lowercase :List[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd) args.base_cmd += F""" --output_dir {output_dir}""" # ensure we have --overwrite_output_dir lowercase :Union[str, Any] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd) def lowerCamelCase (a_ :Dict , a_ :int , a_ :int , a_ :int , a_ :str , a_ :str , a_ :Optional[Any]) -> Dict: # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0) return dict( {k: random.uniform(0 , 100) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22])} , ) lowercase :str = subprocess.run(a_ , capture_output=a_ , text=a_) if verbose: print('''STDOUT''' , result.stdout) print('''STDERR''' , result.stderr) # save the streams lowercase :List[str] = variation.replace(''' ''' , '''-''') with open(Path(a_) / F"""log.{prefix}.stdout.txt""" , '''w''') as f: f.write(result.stdout) with open(Path(a_) / F"""log.{prefix}.stderr.txt""" , '''w''') as f: f.write(result.stderr) if result.returncode != 0: if verbose: print('''failed''') return {target_metric_key: nan} with io.open(F"""{output_dir}/all_results.json""" , '''r''' , encoding='''utf-8''') as f: lowercase :List[str] = json.load(a_) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def lowerCamelCase (a_ :Optional[int] , a_ :List[Any] , a_ :str , a_ :List[Any] , a_ :Dict , a_ :Optional[int] , a_ :str , a_ :str , a_ :str , a_ :Optional[Any] , ) -> Any: lowercase :int = [] lowercase :Dict = [] lowercase :Optional[Any] = F"""{id}: {variation:<{longest_variation_len}}""" lowercase :List[Any] = F"""{preamble}: """ lowercase :List[Any] = set(report_metric_keys + [target_metric_key]) for i in tqdm(range(a_) , desc=a_ , leave=a_): lowercase :Optional[Any] = process_run_single( a_ , a_ , a_ , a_ , a_ , a_ , a_) lowercase :Tuple = single_run_metrics[target_metric_key] if not math.isnan(a_): metrics.append(a_) results.append(a_) outcome += "✓" else: outcome += "✘" lowercase :Dict = F"""\33[2K\r{outcome}""" if len(a_) > 0: lowercase :int = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()} lowercase :List[Any] = round(mean_metrics[target_metric_key] , 2) lowercase :Union[str, Any] = F"""{outcome} {mean_target}""" if len(a_) > 1: results_str += F""" {tuple(round(a_ , 2) for x in results)}""" print(a_) lowercase :str = variation return mean_metrics else: print(a_) return {variation_key: variation, target_metric_key: nan} def lowerCamelCase () -> Any: lowercase :Dict = torch.cuda.get_device_properties(torch.device('''cuda''')) return F""" Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB """ def lowerCamelCase (a_ :List[Any] , a_ :Optional[int] , a_ :Dict , a_ :Optional[Any] , a_ :Dict) -> Dict: lowercase :Any = pd.DataFrame(a_) lowercase :Tuple = '''variation''' lowercase :int = '''diff_%''' lowercase :Optional[int] = nan if base_variation is not None and len(df[df[variation_key] == base_variation]): # this may still return nan lowercase :Union[str, Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(a_): # as a fallback, use the minimal value as the sentinel lowercase :Dict = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(a_): lowercase :Any = df.apply( lambda a_: round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value) if not math.isnan(r[target_metric_key]) else 0 , axis='''columns''' , ) # re-order columns lowercase :Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys] lowercase :Dict = df.reindex(a_ , axis='''columns''') # reorder cols # capitalize lowercase :Tuple = df.rename(str.capitalize , axis='''columns''') # make the cols as narrow as possible lowercase :Tuple = df.rename(lambda a_: c.replace('''_''' , '''<br>''') , axis='''columns''') lowercase :Optional[int] = df.rename(lambda a_: c.replace('''_''' , '''\n''') , axis='''columns''') lowercase :str = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt='''.2f''')] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt='''.2f''')] print('''\n\n'''.join(a_)) def lowerCamelCase () -> Optional[Any]: lowercase :List[Any] = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=a_ , type=a_ , required=a_ , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=a_ , type=a_ , nargs='''+''' , required=a_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=a_ , type=a_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=a_ , type=a_ , required=a_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=a_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=a_ , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=a_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=a_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) lowercase :List[Any] = parser.parse_args() lowercase :Union[str, Any] = args.output_dir Path(a_).mkdir(exist_ok=a_) lowercase :int = get_base_command(a_ , a_) # split each dimension into its --foo variations lowercase :List[str] = [list(map(str.strip , re.split(R'''\|''' , a_))) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty lowercase :Optional[int] = list(map(str.strip , map(''' '''.join , itertools.product(*a_)))) lowercase :str = max(len(a_) for x in variations) # split wanted keys lowercase :List[str] = args.report_metric_keys.split() # capture prints into a log file for convenience lowercase :Tuple = F"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt""" print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""") print(F"""and this script's output is also piped into {report_fn}""") lowercase :Dict = Tee(a_) print(F"""\n*** Running {len(a_)} benchmarks:""") print(F"""Base command: {' '.join(a_)}""") lowercase :int = '''variation''' lowercase :Dict = [] for id, variation in enumerate(tqdm(a_ , desc='''Total completion: ''' , leave=a_)): lowercase :int = base_cmd + variation.split() results.append( process_run( id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , )) process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_) if __name__ == "__main__": main()
677
"""simple docstring""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class __magic_name__ ( __UpperCAmelCase ): __A : Optional[Any] = "xlm-prophetnet" __A : List[str] = ["past_key_values"] __A : int = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self : Any , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 3_0_5_2_2 , snake_case__ : Optional[int] = 1_0_2_4 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 5_1_2 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 3_2 , snake_case__ : Optional[int] = 1_2_8 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : List[str] , ): '''simple docstring''' lowercase :Tuple = vocab_size lowercase :Optional[int] = hidden_size lowercase :Optional[int] = encoder_ffn_dim lowercase :Optional[int] = num_encoder_layers lowercase :Dict = num_encoder_attention_heads lowercase :List[str] = decoder_ffn_dim lowercase :Dict = num_decoder_layers lowercase :List[Any] = num_decoder_attention_heads lowercase :Optional[int] = max_position_embeddings lowercase :Tuple = init_std # Normal(0, this parameter) lowercase :int = activation_function # parameters for xlmprophetnet lowercase :Dict = ngram lowercase :Optional[Any] = num_buckets lowercase :Dict = relative_max_distance lowercase :List[Any] = disable_ngram_loss lowercase :Optional[Any] = eps # 3 Types of Dropout lowercase :Any = attention_dropout lowercase :List[str] = activation_dropout lowercase :List[str] = dropout lowercase :List[str] = use_cache super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , ) @property def __snake_case ( self : Any ): '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] ): '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and''' ''' `num_decoder_layers`.''' )
677
1
"""simple docstring""" import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def lowerCamelCase (a_ :int) -> List[str]: random.seed(a_) np.random.seed(a_) torch.manual_seed(a_) torch.cuda.manual_seed_all(a_) # ^^ safe to call this function even if cuda is not available class __magic_name__ : def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ): '''simple docstring''' if isinstance(snake_case__ , torch.nn.Module ): lowercase :int = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , ) lowercase :Dict = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility lowercase :Optional[Any] = True if kwargs.get('''max_value''' , snake_case__ ) is not None: lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.''' deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) lowercase :Optional[int] = kwargs['''max_value'''] if kwargs.get('''min_value''' , snake_case__ ) is not None: lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.''' deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) lowercase :str = kwargs['''min_value'''] lowercase :Any = list(snake_case__ ) lowercase :Optional[Any] = [p.clone().detach() for p in parameters] if kwargs.get('''device''' , snake_case__ ) is not None: lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.''' deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) self.to(device=kwargs['''device'''] ) lowercase :int = None lowercase :int = decay lowercase :Union[str, Any] = min_decay lowercase :List[Any] = update_after_step lowercase :Union[str, Any] = use_ema_warmup lowercase :Any = inv_gamma lowercase :Any = power lowercase :str = 0 lowercase :int = None # set in `step()` lowercase :List[str] = model_cls lowercase :Any = model_config @classmethod def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ ) lowercase :List[Any] = model_cls.from_pretrained(snake_case__ ) lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config ) ema_model.load_state_dict(snake_case__ ) return ema_model def __snake_case ( self : int , snake_case__ : Union[str, Any] ): '''simple docstring''' if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) lowercase :Dict = self.model_cls.from_config(self.model_config ) lowercase :Tuple = self.state_dict() state_dict.pop('''shadow_params''' , snake_case__ ) model.register_to_config(**snake_case__ ) self.copy_to(model.parameters() ) model.save_pretrained(snake_case__ ) def __snake_case ( self : int , snake_case__ : int ): '''simple docstring''' lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power else: lowercase :Dict = (1 + step) / (1_0 + step) lowercase :Optional[int] = min(snake_case__ , self.decay ) # make sure decay is not smaller than min_decay lowercase :Optional[int] = max(snake_case__ , self.min_decay ) return cur_decay_value @torch.no_grad() def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' if isinstance(snake_case__ , torch.nn.Module ): lowercase :Tuple = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , ) lowercase :Union[str, Any] = parameters.parameters() lowercase :Optional[Any] = list(snake_case__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. lowercase :List[Any] = self.get_decay(self.optimization_step ) lowercase :Optional[Any] = decay lowercase :List[Any] = 1 - decay lowercase :List[str] = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , snake_case__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(snake_case__ ) def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' lowercase :Optional[Any] = list(snake_case__ ) for s_param, param in zip(self.shadow_params , snake_case__ ): param.data.copy_(s_param.to(param.device ).data ) def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ): '''simple docstring''' lowercase :str = [ p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ ) for p in self.shadow_params ] def __snake_case ( self : Dict ): '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' lowercase :str = [param.detach().cpu().clone() for param in parameters] def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params , snake_case__ ): param.data.copy_(c_param.data ) # Better memory-wise. lowercase :Dict = None def __snake_case ( self : Union[str, Any] , snake_case__ : dict ): '''simple docstring''' lowercase :List[str] = copy.deepcopy(snake_case__ ) lowercase :Any = state_dict.get('''decay''' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) lowercase :int = state_dict.get('''min_decay''' , self.min_decay ) if not isinstance(self.min_decay , snake_case__ ): raise ValueError('''Invalid min_decay''' ) lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step ) if not isinstance(self.optimization_step , snake_case__ ): raise ValueError('''Invalid optimization_step''' ) lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step ) if not isinstance(self.update_after_step , snake_case__ ): raise ValueError('''Invalid update_after_step''' ) lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , snake_case__ ): raise ValueError('''Invalid use_ema_warmup''' ) lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('''Invalid inv_gamma''' ) lowercase :Dict = state_dict.get('''power''' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('''Invalid power''' ) lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ ) if shadow_params is not None: lowercase :List[Any] = shadow_params if not isinstance(self.shadow_params , snake_case__ ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
677
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
677
1