code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase :Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :int = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCamelCase :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Union[str, Any] = tempfile.mkdtemp() A_ : List[Any] = BlipImageProcessor() A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase ) processor.save_pretrained(self.tmpdirname ) def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer def _a (self ): shutil.rmtree(self.tmpdirname ) def _a (self ): A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a (self ): A_ : str = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 ) A_ : str = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase ) self.assertIsInstance(processor.qformer_tokenizer , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : List[str] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = self.prepare_image_inputs() A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" ) A_ : Dict = processor(images=lowercase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a (self ): A_ : List[Any] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : Any = self.get_qformer_tokenizer() A_ : List[str] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : str = """lower newer""" A_ : List[Any] = processor(text=lowercase ) A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase ) A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def _a (self ): A_ : int = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Any = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Optional[int] = """lower newer""" A_ : Optional[int] = self.prepare_image_inputs() A_ : Tuple = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(lowercase ): processor() def _a (self ): A_ : Dict = self.get_image_processor() A_ : str = self.get_tokenizer() A_ : Optional[int] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : Optional[int] = processor.batch_decode(lowercase ) A_ : Dict = tokenizer.batch_decode(lowercase ) self.assertListEqual(lowercase , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Dict = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Optional[int] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = """lower newer""" A_ : Optional[Any] = self.prepare_image_inputs() A_ : Any = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
667
1
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowerCAmelCase ( yaml.SafeLoader ): def _a (self , lowercase ): A_ : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value] A_ : Any = [tuple(lowercase ) if isinstance(lowercase , lowercase ) else key for key in keys] A_ : str = Counter(lowercase ) A_ : Dict = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' ) def _a (self , lowercase , lowercase=False ): A_ : Tuple = super().construct_mapping(lowercase , deep=lowercase ) self._check_no_duplicates_on_constructed_node(lowercase ) return mapping def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: A_ : Dict = full_content[1:].index("""---""" ) + 1 A_ : Tuple = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(lowerCamelCase__ ) class _lowerCAmelCase ( __UpperCAmelCase ): # class attributes __SCREAMING_SNAKE_CASE : Optional[int] = {'train_eval_index'} # train-eval-index in the YAML metadata @classmethod def _a (cls , lowercase ): with open(lowercase , encoding="""utf-8""" ) as readme_file: A_, A_ : Optional[int] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(lowercase ) else: return cls() def _a (self , lowercase ): if path.exists(): with open(lowercase , encoding="""utf-8""" ) as readme_file: A_ : Optional[Any] = readme_file.read() else: A_ : Any = None A_ : Optional[Any] = self._to_readme(lowercase ) with open(lowercase , """w""" , encoding="""utf-8""" ) as readme_file: readme_file.write(lowercase ) def _a (self , lowercase = None ): if readme_content is not None: A_, A_ : Optional[int] = _split_yaml_from_readme(lowercase ) A_ : Optional[Any] = """---\n""" + self.to_yaml_string() + """---\n""" + content else: A_ : Any = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def _a (cls , lowercase ): A_ : str = yaml.load(lowercase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields A_ : Optional[int] = { (key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**lowercase ) def _a (self ): return yaml.safe_dump( { (key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=lowercase , allow_unicode=lowercase , encoding="""utf-8""" , ).decode("""utf-8""" ) lowerCamelCase :Union[str, Any] = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCamelCase :str = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') lowerCamelCase :Optional[int] = ap.parse_args() lowerCamelCase :List[Any] = Path(args.readme_filepath) lowerCamelCase :Optional[Any] = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
667
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Optional[Any] = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str' def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ): super().__init__(**lowercase ) A_ : int = image_size A_ : List[str] = patch_size A_ : Tuple = num_channels A_ : List[str] = max_token_length A_ : int = num_character_labels A_ : str = num_bpe_labels A_ : Tuple = num_wordpiece_labels A_ : Optional[int] = hidden_size A_ : List[Any] = num_hidden_layers A_ : int = num_attention_heads A_ : Tuple = mlp_ratio A_ : str = distilled A_ : Union[str, Any] = layer_norm_eps A_ : str = drop_rate A_ : int = qkv_bias A_ : Dict = attn_drop_rate A_ : List[Any] = drop_path_rate A_ : Any = output_aa_attentions A_ : Union[str, Any] = initializer_range
667
1
'''simple docstring''' import argparse import copy def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = {} with open(lowerCamelCase__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: A_ : Tuple = [] _list.append([line.split()[1], line.split()[2]] ) A_ : Any = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: A_ : int = [] _list.append([line.split()[0], line.split()[2]] ) A_ : Dict = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ ) as f: A_ : int = f.read(1 ) A_ : int = start_node A_ : Any = [] A_ : Any = start_node A_ : Dict = 0 while visiting not in first_solution: A_ : Optional[Any] = 1_00_00 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(lowerCamelCase__ ) and k[0] not in first_solution: A_ : Dict = k[1] A_ : Optional[Any] = k[0] first_solution.append(lowerCamelCase__ ) A_ : int = distance_of_first_solution + int(lowerCamelCase__ ) A_ : str = best_node first_solution.append(lowerCamelCase__ ) A_ : str = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 A_ : int = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_00_00 ) return first_solution, distance_of_first_solution def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = [] for n in solution[1:-1]: A_ : List[Any] = solution.index(lowerCamelCase__ ) for kn in solution[1:-1]: A_ : int = solution.index(lowerCamelCase__ ) if n == kn: continue A_ : List[Any] = copy.deepcopy(lowerCamelCase__ ) A_ : Optional[Any] = kn A_ : Optional[int] = n A_ : Tuple = 0 for k in _tmp[:-1]: A_ : Dict = _tmp[_tmp.index(lowerCamelCase__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: A_ : int = distance + int(i[1] ) _tmp.append(lowerCamelCase__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) A_ : int = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda lowerCamelCase__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = 1 A_ : Dict = first_solution A_ : Tuple = [] A_ : Dict = distance_of_first_solution A_ : int = solution while count <= iters: A_ : List[Any] = find_neighborhood(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[int] = 0 A_ : str = neighborhood[index_of_best_solution] A_ : int = len(lowerCamelCase__ ) - 1 A_ : int = False while not found: A_ : Optional[int] = 0 while i < len(lowerCamelCase__ ): if best_solution[i] != solution[i]: A_ : Any = best_solution[i] A_ : str = solution[i] break A_ : Union[str, Any] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) A_ : Optional[int] = True A_ : List[Any] = best_solution[:-1] A_ : Union[str, Any] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: A_ : Union[str, Any] = cost A_ : Any = solution else: A_ : int = index_of_best_solution + 1 A_ : Union[str, Any] = neighborhood[index_of_best_solution] if len(lowerCamelCase__ ) >= size: tabu_list.pop(0 ) A_ : Dict = count + 1 return best_solution_ever, best_cost def a ( lowerCamelCase__=None ): '''simple docstring''' A_ : str = generate_neighbours(args.File ) A_, A_ : Any = generate_first_solution( args.File , lowerCamelCase__ ) A_, A_ : Optional[Any] = tabu_search( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.Iterations , args.Size , ) print(f'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": lowerCamelCase :List[str] = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
667
'''simple docstring''' import math from collections.abc import Callable def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : float = xa A_ : float = xa while True: if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ): raise ZeroDivisionError("""float division by zero, could not find root""" ) A_ : float = x_na - ( function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na A_ : Tuple = x_na A_ : List[Any] = x_na def a ( lowerCamelCase__ ): '''simple docstring''' return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
667
1
'''simple docstring''' import math from collections.abc import Callable def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : float = xa A_ : float = xa while True: if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ): raise ZeroDivisionError("""float division by zero, could not find root""" ) A_ : float = x_na - ( function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na A_ : Tuple = x_na A_ : List[Any] = x_na def a ( lowerCamelCase__ ): '''simple docstring''' return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
667
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase :Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values'] def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ): super().__init__(**lowercase ) A_ : Dict = size if size is not None else {"""shortest_edge""": 224} A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" ) A_ : str = do_resize A_ : str = size A_ : List[str] = resample A_ : Any = do_center_crop A_ : Union[str, Any] = crop_size A_ : List[Any] = do_rescale A_ : List[Any] = rescale_factor A_ : Dict = do_normalize A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Union[str, Any] = do_convert_rgb def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): A_ : List[str] = do_resize if do_resize is not None else self.do_resize A_ : int = size if size is not None else self.size A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase ) A_ : int = resample if resample is not None else self.resample A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase ) A_ : str = do_rescale if do_rescale is not None else self.do_rescale A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : Any = image_mean if image_mean is not None else self.image_mean A_ : Any = image_std if image_std is not None else self.image_std A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : List[str] = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : int = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A_ : int = [to_numpy_array(lowercase ) for image in images] if do_resize: A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
667
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :str = logging.get_logger(__name__) lowerCamelCase :List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[str] = 'camembert' def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ): super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A_ : Tuple = vocab_size A_ : Tuple = hidden_size A_ : List[str] = num_hidden_layers A_ : Tuple = num_attention_heads A_ : int = hidden_act A_ : Tuple = intermediate_size A_ : str = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Tuple = max_position_embeddings A_ : int = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : List[Any] = position_embedding_type A_ : Any = use_cache A_ : Optional[Any] = classifier_dropout class _lowerCAmelCase ( __UpperCAmelCase ): @property def _a (self ): if self.task == "multiple-choice": A_ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
667
'''simple docstring''' class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase ): A_ : List[str] = name A_ : Dict = value A_ : Optional[int] = weight def __repr__(self ): return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def _a (self ): return self.value def _a (self ): return self.name def _a (self ): return self.weight def _a (self ): return self.value / self.weight def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = [] for i in range(len(lowerCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ ) A_ : Any = [] A_, A_ : Tuple = 0.0, 0.0 for i in range(len(lowerCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : list[list[int]] = [] create_all_state(1 , lowerCamelCase__ , lowerCamelCase__ , [] , lowerCamelCase__ ) return result def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(lowerCamelCase__ , total_number - level + 2 ): current_list.append(lowerCamelCase__ ) create_all_state(i + 1 , lowerCamelCase__ , level - 1 , lowerCamelCase__ , lowerCamelCase__ ) current_list.pop() def a ( lowerCamelCase__ ): '''simple docstring''' for i in total_list: print(*lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Tuple = 4 lowerCamelCase :Union[str, Any] = 2 lowerCamelCase :Optional[Any] = generate_all_combinations(n, k) print_all_state(total_list)
667
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCamelCase :int = logging.getLogger(__name__) lowerCamelCase :List[Any] = 5_0 # max width of layer names lowerCamelCase :List[Any] = 7_0 # max width of quantizer names def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" ) group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" ) group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" ) group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" ) group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" ) group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" ) group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" ) group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" ) group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" ) group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" ) group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" ) group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" ) group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" ) group.add_argument( """--recalibrate-weights""" , action="""store_true""" , help=( """recalibrate weight amaxes by taking the max of the weights.""" """ amaxes will be computed with the current quantization granularity (axis).""" ) , ) def a ( lowerCamelCase__ ): '''simple docstring''' if args.calibrator == "max": A_ : Union[str, Any] = """max""" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("""Specify --percentile when using percentile calibrator""" ) A_ : int = """histogram""" elif args.calibrator == "mse": A_ : Dict = """histogram""" else: raise ValueError(f'Invalid calibrator {args.calibrator}' ) A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ ) A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ): '''simple docstring''' logger.info("""Configuring Model for Quantization""" ) logger.info(f'using quantization package {pytorch_quantization.__file__}' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ ) if args.quant_disable: set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ ) if args.recalibrate_weights: recalibrate_weights(lowerCamelCase__ ) if args.fuse_qkv: fuse_qkv(lowerCamelCase__ , lowerCamelCase__ ) if args.clip_gelu: clip_gelu(lowerCamelCase__ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' logger.info("""Enabling Calibration""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'{name:80}: {module}' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' logger.info("""Loading calibrated amax""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("""percentile""" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): for mod in [qq, qk, qv]: if not hasattr(lowerCamelCase__ , """_amax""" ): print(""" WARNING: NO AMAX BUFFER""" ) return A_ : List[Any] = qq._amax.detach().item() A_ : Optional[int] = qk._amax.detach().item() A_ : Dict = qv._amax.detach().item() A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) qq._amax.fill_(lowerCamelCase__ ) qk._amax.fill_(lowerCamelCase__ ) qv._amax.fill_(lowerCamelCase__ ) logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' ) for name, mod in model.named_modules(): if name.endswith(""".attention.self""" ): logger.info(f'FUSE_QKV: {name:{name_width}}' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ): A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ ) A_ : Dict = mod._input_quantizer._amax.data.detach().item() logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None: A_ : Tuple = mod.weight.shape[0] A_ : Dict = mod._weight_quantizer._amax.detach() A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ): if not hasattr(mod.weight_quantizer , """_amax""" ): print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach() logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' ) A_ : str = amax def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ): '''simple docstring''' if ignore is None: A_ : int = [] elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = [ignore] A_ : Optional[Any] = 0 for name, mod in model.named_modules(): if not hasattr(lowerCamelCase__ , """weight""" ): continue A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) ) for name, mod in model.named_modules(): A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ ) A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ ) if not hasattr(lowerCamelCase__ , """weight""" ): continue if type(lowerCamelCase__ ) in ignore: continue if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]: continue A_ : Optional[int] = f'Act:{input_q.extra_repr()}' A_ : Dict = f'Wgt:{weight_q.extra_repr()}' A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}' if len(lowerCamelCase__ ) <= line_width: logger.info(lowerCamelCase__ ) else: logger.info(f'{name:{name_width}} {act_str}' ) logger.info(f'{" ":{name_width}} {wgt_str}' ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = 0 for name, mod in model.named_modules(): if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ): print(f'{name:80} {mod}' ) count += 1 print(f'{count} TensorQuantizers found in model' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if quantizer_mod is not None: assert hasattr(lowerCamelCase__ , lowerCamelCase__ ) setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: logger.warning(f'{name} has no {quantizer}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}' for k, v in kwargs.items(): s += f' {k}={v}' if which in ["input", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) if which in ["weight", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) elif name.endswith("""_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): A_ : Dict = f'Warning: changing {name:{name_width}}' for k, v in kwargs.items(): s += f' {k}={v}' setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ )
667
1
'''simple docstring''' from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[str] = 'ClapFeatureExtractor' __SCREAMING_SNAKE_CASE : Dict = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__(self , lowercase , lowercase ): super().__init__(lowercase , lowercase ) def __call__(self , lowercase=None , lowercase=None , lowercase=None , **lowercase ): A_ : Optional[int] = kwargs.pop("""sampling_rate""" , lowercase ) if text is None and audios is None: raise ValueError("""You have to specify either text or audios. Both cannot be none.""" ) if text is not None: A_ : List[Any] = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if audios is not None: A_ : Optional[int] = self.feature_extractor( lowercase , sampling_rate=lowercase , return_tensors=lowercase , **lowercase ) if text is not None and audios is not None: A_ : int = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase ) def _a (self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property def _a (self ): A_ : List[Any] = self.tokenizer.model_input_names A_ : Tuple = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
667
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : List[Any] = 0 @slow def _a (self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) def _a (self ): A_ : str = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def _a (self ): A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) # Check that tokenizer_type ≠ model_type A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) @require_tokenizers def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" ) self.assertIsInstance(lowercase , lowercase ) def _a (self ): with pytest.raises(lowercase ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) if isinstance(lowercase , lowercase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase ) else: self.assertEqual(tokenizer.do_lower_case , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def _a (self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai A_ : List[str] = TOKENIZER_MAPPING.values() A_ : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(lowercase ) @require_tokenizers def _a (self ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase ) @require_tokenizers def _a (self ): A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase ) A_ : List[Any] = """Hello, world. How are you?""" A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase ) A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def _a (self ): A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(lowercase ) , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def _a (self ): A_ : Any = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(lowercase , lowercase ) def _a (self ): # Check we can load the tokenizer config of an online model. A_ : Tuple = get_tokenizer_config("""bert-base-cased""" ) A_ : Any = config.pop("""_commit_hash""" , lowercase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(lowercase , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. A_ : List[Any] = get_tokenizer_config(lowercase ) self.assertDictEqual(lowercase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. A_ : int = AutoTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Dict = get_tokenizer_config(lowercase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) A_ : Tuple = CustomTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) # Can register in two steps AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: A_ : str = BertTokenizerFast.from_pretrained(lowercase ) bert_tokenizer.save_pretrained(lowercase ) A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase ): A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase ): A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : str = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def _a (self ): class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = False class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = NewTokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = False try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # If remote code is not set, the default is to use local A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. A_ : int = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : List[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): A_ : Dict = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : Optional[int] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def _a (self ): with self.assertRaisesRegex( lowercase , """bert-base is not a local folder and is not a valid model identifier""" ): A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" ) def _a (self ): with self.assertRaisesRegex( lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" ) def _a (self ): # Make sure we have cached the tokenizer. A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
667
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Any = credit_card_number A_ : Dict = 0 A_ : Any = len(lowerCamelCase__ ) - 2 for i in range(lowerCamelCase__ , -1 , -2 ): # double the value of every second digit A_ : Tuple = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 A_ : Optional[int] = cc_number[:i] + str(lowerCamelCase__ ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(lowerCamelCase__ ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = f'{credit_card_number} is an invalid credit card number because' if not credit_card_number.isdigit(): print(f'{error_message} it has nonnumerical characters.' ) return False if not 13 <= len(lowerCamelCase__ ) <= 16: print(f'{error_message} of its length.' ) return False if not validate_initial_digits(lowerCamelCase__ ): print(f'{error_message} of its first two digits.' ) return False if not luhn_validation(lowerCamelCase__ ): print(f'{error_message} it fails the Luhn check.' ) return False print(f'{credit_card_number} is a valid credit card number.' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('''4111111111111111''') validate_credit_card_number('''32323''')
667
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) A_ : int = number_of_bytes // partitions A_ : Union[str, Any] = [] for i in range(lowerCamelCase__ ): A_ : Dict = i * bytes_per_partition + 1 A_ : Tuple = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' import math def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(lowerCamelCase__ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='''malus_law''')
667
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Any = logging.get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' ) A_ : Union[str, Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : str = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Union[str, Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = dct.pop(lowerCamelCase__ ) A_ : Optional[int] = val def a ( lowerCamelCase__ ): '''simple docstring''' if "handwritten" in checkpoint_url: A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ ) A_ : int = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : List[str] = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Union[str, Any] = 10_24 A_ : List[Any] = 40_96 A_ : Dict = 24 A_ : List[str] = 16 A_ : Union[str, Any] = 10_24 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Optional[Any] = False A_ : Union[str, Any] = """relu""" A_ : List[str] = 10_24 A_ : Tuple = True A_ : Tuple = False A_ : List[str] = False # load HuggingFace model A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ ) A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ ) A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) model.eval() # load state_dict of original model, rename some keys A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""] A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ ) if key.startswith("""decoder""" ) and "output_projection" not in key: A_ : str = val else: A_ : List[str] = val # load state dict model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image A_ : str = ViTImageProcessor(size=encoder_config.image_size ) A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" ) A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values # verify logits A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ) A_ : Dict = outputs.logits A_ : str = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : Any = torch.tensor( [-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] ) elif "trocr-base-printed" in checkpoint_url: A_ : List[Any] = torch.tensor( [-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase :Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
667
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def a ( lowerCamelCase__ ): '''simple docstring''' if "cls_token" in name: A_ : Union[str, Any] = name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: A_ : int = name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: A_ : Optional[Any] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: A_ : Optional[int] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: A_ : List[Any] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: A_ : List[str] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: A_ : int = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: A_ : Dict = name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: A_ : Any = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: A_ : Dict = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: A_ : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: A_ : Dict = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: A_ : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: A_ : Optional[int] = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: A_ : Any = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: A_ : Dict = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: A_ : List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: A_ : Tuple = name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: A_ : Tuple = name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A_ : Tuple = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: A_ : List[Any] = key.split(""".""" ) A_ : Dict = int(key_split[1] ) if "decoder_blocks" in key: A_ : Optional[int] = config.decoder_hidden_size A_ : int = """decoder.decoder_layers.""" if "weight" in key: A_ : Tuple = val[:dim, :] A_ : List[str] = val[dim : dim * 2, :] A_ : int = val[-dim:, :] elif "bias" in key: A_ : List[str] = val[:dim] A_ : str = val[dim : dim * 2] A_ : List[str] = val[-dim:] else: A_ : List[str] = config.hidden_size A_ : Union[str, Any] = """vit.encoder.layer.""" if "weight" in key: A_ : Optional[Any] = val[:dim, :] A_ : int = val[dim : dim * 2, :] A_ : Optional[Any] = val[-dim:, :] elif "bias" in key: A_ : Dict = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : List[str] = val[-dim:] else: A_ : Union[str, Any] = val return orig_state_dict def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = ViTMAEConfig() if "large" in checkpoint_url: A_ : Optional[Any] = 10_24 A_ : List[str] = 40_96 A_ : str = 24 A_ : Union[str, Any] = 16 elif "huge" in checkpoint_url: A_ : str = 14 A_ : List[Any] = 12_80 A_ : Any = 51_20 A_ : List[str] = 32 A_ : List[Any] = 16 A_ : List[Any] = ViTMAEForPreTraining(lowerCamelCase__ ) A_ : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" )["""model"""] A_ : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size ) A_ : Optional[Any] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() A_ : Optional[int] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) A_ : int = ViTMAEImageProcessor(size=config.image_size ) A_ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) A_ : Dict = model(**lowerCamelCase__ ) A_ : List[Any] = outputs.logits if "large" in checkpoint_url: A_ : List[str] = torch.tensor( [[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] ) elif "huge" in checkpoint_url: A_ : Optional[int] = torch.tensor( [[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] ) else: A_ : str = torch.tensor( [[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCamelCase :List[str] = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
667
'''simple docstring''' print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
667
1
'''simple docstring''' import math import tensorflow as tf from packaging import version def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = tf.convert_to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = tf.convert_to_tensor(lowerCamelCase__ ) A_ : List[Any] = tf.cast(math.pi , x.dtype ) A_ : Any = tf.cast(0.044_715 , x.dtype ) A_ : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) )) return x * cdf def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = tf.convert_to_tensor(lowerCamelCase__ ) return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = tf.convert_to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = tf.cast(0.044_715 , x.dtype ) A_ : str = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = tf.convert_to_tensor(lowerCamelCase__ ) A_ : List[Any] = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def a ( lowerCamelCase__ ): '''simple docstring''' return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 ) def a ( lowerCamelCase__ , lowerCamelCase__=-1 ): '''simple docstring''' A_, A_ : Any = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ ) return a * tf.math.sigmoid(lowerCamelCase__ ) if version.parse(tf.version.VERSION) >= version.parse('''2.4'''): def a ( lowerCamelCase__ ): '''simple docstring''' return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ ) lowerCamelCase :Union[str, Any] = tf.keras.activations.gelu lowerCamelCase :Optional[Any] = approximate_gelu_wrap else: lowerCamelCase :Optional[int] = _gelu lowerCamelCase :List[str] = _gelu_new lowerCamelCase :Union[str, Any] = { '''gelu''': gelu, '''gelu_10''': gelu_aa, '''gelu_fast''': gelu_fast, '''gelu_new''': gelu_new, '''glu''': glu, '''mish''': mish, '''quick_gelu''': quick_gelu, '''relu''': tf.keras.activations.relu, '''sigmoid''': tf.keras.activations.sigmoid, '''silu''': tf.keras.activations.swish, '''swish''': tf.keras.activations.swish, '''tanh''': tf.keras.activations.tanh, } def a ( lowerCamelCase__ ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
667
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY) def a ( ): '''simple docstring''' A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ ) # assert negative_img array for at least one True assert negative_img.any() def a ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def a ( ): '''simple docstring''' A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a ( ): '''simple docstring''' A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() A_ : List[Any] = canny.canny(lowerCamelCase__ ) # assert canny array for at least one True assert canny_array.any() def a ( ): '''simple docstring''' assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all() def a ( ): '''simple docstring''' A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ ) assert res.any() def a ( ): '''simple docstring''' assert med.median_filter(lowerCamelCase__ , 3 ).any() def a ( ): '''simple docstring''' A_, A_ : int = sob.sobel_filter(lowerCamelCase__ ) assert grad.any() and theta.any() def a ( ): '''simple docstring''' A_ : int = sp.make_sepia(lowerCamelCase__ , 20 ) assert sepia.all() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def a ( ): '''simple docstring''' A_ : int = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 ) # Test for get_neighbors_pixel function() return not None A_ : str = 0 A_ : str = 0 A_ : Dict = image[x_coordinate][y_coordinate] A_ : Optional[Any] = lbp.get_neighbors_pixel( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A_ : str = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert lbp_image.any()
667
1
'''simple docstring''' import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCamelCase :int = logging.get_logger(__name__) class _lowerCAmelCase : def __init__(self , lowercase , lowercase ): A_ : Optional[int] = question_encoder A_ : List[Any] = generator A_ : Tuple = self.question_encoder def _a (self , lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(lowercase , exist_ok=lowercase ) A_ : Tuple = os.path.join(lowercase , """question_encoder_tokenizer""" ) A_ : Union[str, Any] = os.path.join(lowercase , """generator_tokenizer""" ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def _a (cls , lowercase , **lowercase ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer A_ : List[str] = kwargs.pop("""config""" , lowercase ) if config is None: A_ : Optional[int] = RagConfig.from_pretrained(lowercase ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) A_ : List[str] = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__(self , *lowercase , **lowercase ): return self.current_tokenizer(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): return self.generator.batch_decode(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): return self.generator.decode(*lowercase , **lowercase ) def _a (self ): A_ : List[Any] = self.question_encoder def _a (self ): A_ : Optional[Any] = self.generator def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , lowercase , ) if max_length is None: A_ : str = self.current_tokenizer.model_max_length A_ : List[str] = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: A_ : str = self.current_tokenizer.model_max_length A_ : Optional[Any] = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) A_ : List[Any] = labels["""input_ids"""] return model_inputs
667
'''simple docstring''' from importlib import import_module from .logging import get_logger lowerCamelCase :Dict = get_logger(__name__) class _lowerCAmelCase : def __init__(self , lowercase , lowercase=None ): A_ : Optional[int] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , lowercase , getattr(lowercase , lowercase ) ) A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Dict = [] def __init__(self , lowercase , lowercase , lowercase , lowercase=None ): A_ : Union[str, Any] = obj A_ : Optional[int] = target A_ : Optional[Any] = new A_ : Optional[Any] = target.split(""".""" )[0] A_ : Tuple = {} A_ : Optional[int] = attrs or [] def __enter__(self ): *A_, A_ : Optional[Any] = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase ) ): try: A_ : Any = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): A_ : int = getattr(self.obj , lowercase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): A_ : str = obj_attr # patch at top level setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) ) A_ : Optional[Any] = getattr(self.obj , lowercase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) ) A_ : Dict = getattr(lowercase , lowercase ) # finally set the target attribute setattr(lowercase , lowercase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowercase ) is attr_value: A_ : Dict = getattr(self.obj , lowercase ) setattr(self.obj , lowercase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" A_ : int = globals()["""__builtins__"""][target_attr] setattr(self.obj , lowercase , self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__(self , *lowercase ): for attr in list(self.original ): setattr(self.obj , lowercase , self.original.pop(lowercase ) ) def _a (self ): self.__enter__() self._active_patches.append(self ) def _a (self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
667
1
'''simple docstring''' import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def a ( lowerCamelCase__ ): '''simple docstring''' A_ : str = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' A_, A_ : str = emb.weight.shape A_ : Dict = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) A_ : Tuple = emb.weight.data return lin_layer def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = torch.load(lowerCamelCase__ , map_location="""cpu""" ) A_ : str = Namespace(**checkpoint["""cfg"""]["""model"""] ) A_ : Optional[int] = checkpoint["""model"""] remove_ignore_keys_(lowerCamelCase__ ) A_ : Tuple = state_dict["""decoder.embed_tokens.weight"""].shape[0] A_ : int = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} A_ : int = XGLMConfig( vocab_size=lowerCamelCase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) A_ : Union[str, Any] = XGLMForCausalLM(lowerCamelCase__ ) A_ : int = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) print(lowerCamelCase__ ) A_ : Tuple = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": lowerCamelCase :int = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase :Union[str, Any] = parser.parse_args() lowerCamelCase :List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
667
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase :int = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :List[Any] = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Any = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
1
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a ( ): '''simple docstring''' A_ : Any = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=lowerCamelCase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=lowerCamelCase__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=lowerCamelCase__ ) return parser.parse_args() def a ( ): '''simple docstring''' A_ : Optional[int] = parse_args() # Import training_script as a module. A_ : Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) A_ : int = script_fpath.stem A_ : int = importlib.import_module(lowerCamelCase__ ) # Patch sys.argv A_ : Optional[Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
667
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ): super().__init__() self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase ) # create a imagenet -> id dictionary for easier use A_ : str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): A_ : Optional[Any] = int(lowercase ) A_ : List[Any] = dict(sorted(self.labels.items() ) ) def _a (self , lowercase ): if not isinstance(lowercase , lowercase ): A_ : Optional[int] = list(lowercase ) for l in label: if l not in self.labels: raise ValueError( F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ): A_ : Tuple = len(lowercase ) A_ : Optional[Any] = self.transformer.config.sample_size A_ : int = self.transformer.config.in_channels A_ : Optional[int] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , ) A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 ) A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device ) A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: A_ : List[Any] = latent_model_input[: len(lowercase ) // 2] A_ : List[str] = torch.cat([half, half] , dim=0 ) A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase ) A_ : Tuple = t if not torch.is_tensor(lowercase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) A_ : Optional[Any] = latent_model_input.device.type == """mps""" if isinstance(lowercase , lowercase ): A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa else: A_ : List[Any] = torch.intaa if is_mps else torch.intaa A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: A_ : List[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A_ : int = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output A_ : List[Any] = self.transformer( lowercase , timestep=lowercase , class_labels=lowercase ).sample # perform guidance if guidance_scale > 1: A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 ) A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps) A_ : str = torch.cat([half_eps, half_eps] , dim=0 ) A_ : Optional[int] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: A_, A_ : int = torch.split(lowercase , lowercase , dim=1 ) else: A_ : Optional[int] = noise_pred # compute previous image: x_t -> x_t-1 A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample if guidance_scale > 1: A_, A_ : int = latent_model_input.chunk(2 , dim=0 ) else: A_ : Union[str, Any] = latent_model_input A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents A_ : List[Any] = self.vae.decode(lowercase ).sample A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A_ : int = self.numpy_to_pil(lowercase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowercase )
667
1
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : int = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""") A_ : Tuple = ( ("""layer.""", """layer_"""), ("""word_embeddings.weight""", """word_embeddings"""), ("""position_embeddings.weight""", """position_embeddings"""), ("""token_type_embeddings.weight""", """token_type_embeddings"""), (""".""", """/"""), ("""LayerNorm/weight""", """LayerNorm/gamma"""), ("""LayerNorm/bias""", """LayerNorm/beta"""), ("""weight""", """kernel"""), ) if not os.path.isdir(lowerCamelCase__ ): os.makedirs(lowerCamelCase__ ) A_ : List[Any] = model.state_dict() def to_tf_var_name(lowerCamelCase__ ): for patt, repl in iter(lowerCamelCase__ ): A_ : Any = name.replace(lowerCamelCase__ , lowerCamelCase__ ) return f'bert/{name}' def create_tf_var(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): A_ : List[str] = tf.dtypes.as_dtype(tensor.dtype ) A_ : List[str] = tf.get_variable(dtype=lowerCamelCase__ , shape=tensor.shape , name=lowerCamelCase__ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(lowerCamelCase__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: A_ : Union[str, Any] = to_tf_var_name(lowerCamelCase__ ) A_ : Any = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): A_ : int = torch_tensor.T A_ : Union[str, Any] = create_tf_var(tensor=lowerCamelCase__ , name=lowerCamelCase__ , session=lowerCamelCase__ ) tf.keras.backend.set_value(lowerCamelCase__ , lowerCamelCase__ ) A_ : List[str] = session.run(lowerCamelCase__ ) print(f'Successfully created {tf_name}: {np.allclose(lowerCamelCase__ , lowerCamelCase__ )}' ) A_ : Union[str, Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(lowerCamelCase__ , os.path.join(lowerCamelCase__ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) ) def a ( lowerCamelCase__=None ): '''simple docstring''' A_ : int = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""model name e.g. bert-base-uncased""" ) parser.add_argument( """--cache_dir""" , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , help="""Directory containing pytorch model""" ) parser.add_argument("""--pytorch_model_path""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""/path/to/<pytorch-model-name>.bin""" ) parser.add_argument("""--tf_cache_dir""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Directory in which to save tensorflow model""" ) A_ : Dict = parser.parse_args(lowerCamelCase__ ) A_ : List[str] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=lowerCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
667
'''simple docstring''' import math lowerCamelCase :int = 1_0 lowerCamelCase :List[Any] = 7 lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def a ( lowerCamelCase__ = 20 ): '''simple docstring''' A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ ) A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total) return f'{result:.9f}' if __name__ == "__main__": print(solution(2_0))
667
1
'''simple docstring''' import baseaa def a ( lowerCamelCase__ ): '''simple docstring''' return baseaa.baaencode(string.encode("""utf-8""" ) ) def a ( lowerCamelCase__ ): '''simple docstring''' return baseaa.baadecode(lowerCamelCase__ ).decode("""utf-8""" ) if __name__ == "__main__": lowerCamelCase :int = '''Hello World!''' lowerCamelCase :List[Any] = baseaa_encode(test) print(encoded) lowerCamelCase :Union[str, Any] = baseaa_decode(encoded) print(decoded)
667
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :List[Any] = logging.get_logger(__name__) lowerCamelCase :Union[str, Any] = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model' __SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values'] __SCREAMING_SNAKE_CASE : List[Any] = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ): A_ : Tuple = vocab_size A_ : str = hidden_size A_ : Optional[Any] = d_kv A_ : Tuple = d_ff A_ : str = num_layers A_ : int = num_heads A_ : Dict = relative_attention_num_buckets A_ : Optional[Any] = relative_attention_max_distance A_ : Dict = dropout_rate A_ : Optional[int] = layer_norm_epsilon A_ : Dict = initializer_factor A_ : Any = use_cache A_ : int = eos_token_id A_ : Tuple = decoder_start_token_id # for backwards compatibility A_ : str = dense_act_fn super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , ) @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : int = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model' def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ): super().__init__(**lowercase ) A_ : List[str] = hidden_size A_ : Optional[int] = patch_embed_hidden_size A_ : Any = d_ff A_ : str = dropout_rate A_ : Dict = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : List[Any] = initializer_range A_ : List[str] = initializer_factor A_ : Dict = attention_dropout A_ : Optional[Any] = layer_norm_eps A_ : Optional[Any] = dense_act_fn A_ : List[Any] = seq_len A_ : Tuple = relative_attention_num_buckets A_ : Any = relative_attention_max_distance A_ : int = d_kv @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : Tuple = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = 'pix2struct' __SCREAMING_SNAKE_CASE : List[Any] = True def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ): super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase ) if text_config is None: A_ : Optional[Any] = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) A_ : Tuple = PixaStructTextConfig(**lowercase ) A_ : List[str] = PixaStructVisionConfig(**lowercase ) A_ : Dict = self.text_config.decoder_start_token_id A_ : Union[str, Any] = self.text_config.pad_token_id A_ : str = self.text_config.eos_token_id A_ : List[str] = initializer_factor A_ : int = initializer_range A_ : Tuple = self.initializer_range A_ : Tuple = self.initializer_range A_ : List[str] = is_vqa @classmethod def _a (cls , lowercase , lowercase , **lowercase ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase ) def _a (self ): A_ : Optional[Any] = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : List[Any] = self.vision_config.to_dict() A_ : List[str] = self.__class__.model_type return output
667
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def a ( ): '''simple docstring''' A_ : str = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase__ ) A_ : List[str] = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowerCamelCase__ ) env_command_parser(subparsers=lowerCamelCase__ ) launch_command_parser(subparsers=lowerCamelCase__ ) tpu_command_parser(subparsers=lowerCamelCase__ ) test_command_parser(subparsers=lowerCamelCase__ ) # Let's go A_ : Optional[int] = parser.parse_args() if not hasattr(lowerCamelCase__ , """func""" ): parser.print_help() exit(1 ) # Run args.func(lowerCamelCase__ ) if __name__ == "__main__": main()
667
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available lowerCamelCase :Union[str, Any] = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :int = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Tuple = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
1
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ ) if matches: A_ : Optional[Any] = float(matches[1] ) A_ : Union[str, Any] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". A_ : Optional[Any] = 10_01 A_ : Union[str, Any] = """imagenet-1k-id2label.json""" A_ : List[str] = """huggingface/label-files""" A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()} A_ : int = """background""" A_ : List[str] = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def a ( ): '''simple docstring''' A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ ) # Load 🤗 model A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor A_ : Any = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" ) A_ : List[str] = model(**lowerCamelCase__ ) A_ : Any = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: A_ : Any = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print("""Pushing to the hub...""" ) A_ : Union[str, Any] = """google/""" + model_name image_processor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase :str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
667
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer'] __SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor' __SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__(self , lowercase=None , lowercase=None , **lowercase ): A_ : Dict = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase , ) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase , lowercase ) def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) A_ : Dict = features["""words"""] A_ : Optional[int] = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) # add pixel values A_ : List[Any] = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] ) A_ : Optional[int] = images return encoded_inputs def _a (self , lowercase , lowercase ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image A_ : str = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase ) != len(lowercase ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" F' {len(lowercase )} and {len(lowercase )}' ) return images_with_overflow def _a (self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property def _a (self ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _a (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , ) return self.image_processor_class @property def _a (self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , ) return self.image_processor
667
1
'''simple docstring''' import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCamelCase :Optional[int] = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN''']) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = test_results.split(""" """ ) A_ : List[Any] = 0 A_ : str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. A_ : List[str] = expressions[-2] if """=""" in expressions[-1] else expressions[-1] for i, expression in enumerate(lowerCamelCase__ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = {} A_ : str = None A_ : List[str] = False for line in failures_short_lines.split("""\n""" ): if re.search(r"""_ \[doctest\]""" , lowerCamelCase__ ): A_ : List[Any] = True A_ : Optional[int] = line.split(""" """ )[2] elif in_error and not line.split(""" """ )[0].isdigit(): A_ : List[str] = line A_ : Any = False return failures class _lowerCAmelCase : def __init__(self , lowercase , lowercase ): A_ : List[str] = title A_ : Any = doc_test_results["""time_spent"""].split(""",""" )[0] A_ : Any = doc_test_results["""success"""] A_ : str = doc_test_results["""failures"""] A_ : Any = self.n_success + self.n_failures # Failures and success of the modeling tests A_ : Tuple = doc_test_results @property def _a (self ): A_ : List[str] = [self._time_spent] A_ : Optional[int] = 0 for time in time_spent: A_ : List[Any] = time.split(""":""" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowercase ) == 1: A_ : Any = [0, 0, time_parts[0]] A_, A_, A_ : List[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds A_, A_, A_ : Dict = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F'{int(lowercase )}h{int(lowercase )}m{int(lowercase )}s' @property def _a (self ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _a (self ): return { "type": "section", "text": { "type": "plain_text", "text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def _a (self ): return { "type": "section", "text": { "type": "plain_text", "text": ( F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in' F' {self.time}.' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def _a (self ): A_ : Union[str, Any] = 40 A_ : List[Any] = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowercase , lowercase )} A_ : Union[str, Any] = """""" for category, failures in category_failures.items(): if len(lowercase ) == 0: continue if report != "": report += "\n\n" report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(lowercase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F'The following examples had failures:\n\n\n{report}\n', }, } @property def _a (self ): A_ : Optional[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(lowercase ) @staticmethod def _a (): A_ : Optional[Any] = [ { """type""": """section""", """text""": { """type""": """plain_text""", """text""": """There was an issue running the tests.""", }, """accessory""": { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True}, """url""": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } ] print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(lowercase )} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowercase , ) def _a (self ): print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(self.payload )} ) ) A_ : Any = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else """All tests passed.""" A_ : Optional[int] = client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowercase , ) def _a (self , lowercase , lowercase , lowercase , lowercase ): A_ : Tuple = """""" for key, value in failures.items(): A_ : List[Any] = value[:200] + """ [Truncated]""" if len(lowercase ) > 250 else value failures_text += F'*{key}*\n_{value}_\n\n' A_ : Dict = job_name A_ : str = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}} if job_link is not None: A_ : int = { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True}, """url""": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _a (self ): if self.thread_ts is None: raise ValueError("""Can only post reply if a post has been made.""" ) A_ : List[Any] = self.doc_test_results.pop("""job_link""" ) self.doc_test_results.pop("""failures""" ) self.doc_test_results.pop("""success""" ) self.doc_test_results.pop("""time_spent""" ) A_ : List[Any] = sorted(self.doc_test_results.items() , key=lambda lowercase : t[0] ) for job, job_result in sorted_dict: if len(job_result["""failures"""] ): A_ : List[Any] = F'*Num failures* :{len(job_result["failed"] )} \n' A_ : str = job_result["""failures"""] A_ : Any = self.get_reply_blocks(lowercase , lowercase , lowercase , text=lowercase ) print("""Sending the following reply""" ) print(json.dumps({"""blocks""": blocks} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'Results for {job}' , blocks=lowercase , thread_ts=self.thread_ts["""ts"""] , ) time.sleep(1 ) def a ( ): '''simple docstring''' A_ : Dict = os.environ["""GITHUB_RUN_ID"""] A_ : str = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100' A_ : Union[str, Any] = requests.get(lowerCamelCase__ ).json() A_ : Tuple = {} try: jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) A_ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(lowerCamelCase__ ): A_ : Optional[int] = requests.get(url + f'&page={i + 2}' ).json() jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return jobs except Exception as e: print("""Unknown error, could not fetch links.""" , lowerCamelCase__ ) return {} def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = {} if os.path.exists(lowerCamelCase__ ): A_ : Dict = os.listdir(lowerCamelCase__ ) for file in files: try: with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , encoding="""utf-8""" ) as f: A_ : str = f.read() except UnicodeDecodeError as e: raise ValueError(f'Could not open {os.path.join(lowerCamelCase__ , lowerCamelCase__ )}.' ) from e return _artifact def a ( ): '''simple docstring''' class _lowerCAmelCase : def __init__(self , lowercase ): A_ : List[Any] = name A_ : List[Any] = [] def __str__(self ): return self.name def _a (self , lowercase ): self.paths.append({"""name""": self.name, """path""": path} ) A_ : Dict[str, Artifact] = {} A_ : Tuple = filter(os.path.isdir , os.listdir() ) for directory in directories: A_ : Dict = directory if artifact_name not in _available_artifacts: A_ : List[str] = Artifact(lowerCamelCase__ ) _available_artifacts[artifact_name].add_path(lowerCamelCase__ ) return _available_artifacts if __name__ == "__main__": lowerCamelCase :Optional[Any] = get_job_links() lowerCamelCase :int = retrieve_available_artifacts() lowerCamelCase :int = collections.OrderedDict( [ ('''*.py''', '''API Examples'''), ('''*.md''', '''MD Examples'''), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCamelCase :Dict = { v: { '''failed''': [], '''failures''': {}, } for v in docs.values() } # Link to the GitHub Action job lowerCamelCase :Union[str, Any] = github_actions_job_links.get('''run_doctests''') lowerCamelCase :Dict = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0] lowerCamelCase :Optional[Any] = retrieve_artifact(artifact_path['''name''']) if "stats" in artifact: lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = handle_test_results(artifact['''stats''']) lowerCamelCase :List[str] = failed lowerCamelCase :List[str] = success lowerCamelCase :Union[str, Any] = time_spent[1:-1] + ''', ''' lowerCamelCase :Optional[int] = extract_first_line_failure(artifact['''failures_short''']) for line in artifact["summary_short"].split('''\n'''): if re.search('''FAILED''', line): lowerCamelCase :int = line.replace('''FAILED ''', '''''') lowerCamelCase :Dict = line.split()[0].replace('''\n''', '''''') if "::" in line: lowerCamelCase , lowerCamelCase :Optional[Any] = line.split('''::''') else: lowerCamelCase , lowerCamelCase :str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCamelCase :List[Any] = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCamelCase :List[str] = all_failures[test] if test in all_failures else '''N/A''' lowerCamelCase :Optional[Any] = failure break lowerCamelCase :int = Message('''🤗 Results of the doc tests.''', doc_test_results) message.post() message.post_reply()
667
'''simple docstring''' from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCamelCase :Optional[int] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , **lowercase ): super().__init__(**lowercase ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) self.check_model_type(lowercase ) def _a (self , **lowercase ): A_ : str = {} A_ : Dict = {} A_ : str = {} # preprocess args if "points_per_batch" in kwargs: A_ : Dict = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: A_ : int = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: A_ : str = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: A_ : int = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: A_ : Any = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: A_ : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: A_ : Union[str, Any] = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: A_ : List[str] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: A_ : List[Any] = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ): return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase ) def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ): A_ : Tuple = load_image(lowercase ) A_ : int = self.image_processor.size["""longest_edge"""] A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": A_ : Optional[Any] = self.get_inference_context() with inference_context(): A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device ) A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) A_ : Tuple = image_embeddings A_ : Dict = grid_points.shape[1] A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , lowercase , lowercase ): A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :] A_ : List[Any] = input_labels[:, i : i + points_per_batch] A_ : Optional[Any] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ): A_ : Any = model_inputs.pop("""input_boxes""" ) A_ : str = model_inputs.pop("""is_last""" ) A_ : int = model_inputs.pop("""original_sizes""" ).tolist() A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist() A_ : List[str] = self.model(**lowercase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks A_ : Optional[int] = model_outputs["""pred_masks"""] A_ : Tuple = self.image_processor.post_process_masks( lowercase , lowercase , lowercase , lowercase , binarize=lowercase ) A_ : Union[str, Any] = model_outputs["""iou_scores"""] A_, A_, A_ : Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ): A_ : Tuple = [] A_ : Optional[Any] = [] A_ : str = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) A_ : Any = torch.cat(lowercase ) A_ : List[Any] = torch.cat(lowercase ) A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation( lowercase , lowercase , lowercase , lowercase ) A_ : int = defaultdict(lowercase ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowercase ) A_ : Optional[int] = {} if output_rle_mask: A_ : List[str] = rle_mask if output_bboxes_mask: A_ : Optional[int] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
667
1
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : List[str] = """hf-internal-testing/tiny-random-t5""" A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) A_ : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) A_ : Dict = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) A_ : str = model.generate(**lowercase ) A_ : Union[str, Any] = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase ) A_ : int = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) A_ : Optional[Any] = model_reloaded.generate(**lowercase ) self.assertTrue(torch.allclose(lowercase , lowercase ) ) def _a (self ): A_ : Optional[Any] = """hf-internal-testing/tiny-random-t5""" A_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) A_ : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(lowercase ): model.save_pretrained(lowercase ) A_ : int = model.reverse_bettertransformer() model.save_pretrained(lowercase )
667
'''simple docstring''' from collections.abc import Callable import numpy as np def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) A_ : int = np.zeros((n + 1,) ) A_ : List[str] = ya A_ : Any = xa for k in range(lowerCamelCase__ ): A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) A_ : Optional[int] = y[k] + ( (step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' if num < 0: return False A_ : int = num A_ : int = 0 while num > 0: A_ : Dict = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
667
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ ) if matches: A_ : Optional[Any] = float(matches[1] ) A_ : Union[str, Any] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". A_ : Optional[Any] = 10_01 A_ : Union[str, Any] = """imagenet-1k-id2label.json""" A_ : List[str] = """huggingface/label-files""" A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()} A_ : int = """background""" A_ : List[str] = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def a ( ): '''simple docstring''' A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ ) # Load 🤗 model A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor A_ : Any = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" ) A_ : List[str] = model(**lowerCamelCase__ ) A_ : Any = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: A_ : Any = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print("""Pushing to the hub...""" ) A_ : Union[str, Any] = """google/""" + model_name image_processor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase :str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
667
1
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) A_ : Optional[Any] = str(bin(lowerCamelCase__ ) ) binary_number += "0" * shift_amount return binary_number def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) A_ : List[str] = str(bin(lowerCamelCase__ ) )[2:] if shift_amount >= len(lowerCamelCase__ ): return "0b0" A_ : Any = binary_number[: len(lowerCamelCase__ ) - shift_amount] return "0b" + shifted_binary_number def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if number >= 0: # Get binary representation of positive number A_ : str = """0""" + str(bin(lowerCamelCase__ ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number A_ : Optional[Any] = len(bin(lowerCamelCase__ )[3:] ) # Find 2's complement of number A_ : Any = bin(abs(lowerCamelCase__ ) - (1 << binary_number_length) )[3:] A_ : Dict = ( """1""" + """0""" * (binary_number_length - len(lowerCamelCase__ )) + binary_number ) if shift_amount >= len(lowerCamelCase__ ): return "0b" + binary_number[0] * len(lowerCamelCase__ ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(lowerCamelCase__ ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
667
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer' __SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer'] __SCREAMING_SNAKE_CASE : Tuple = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__(self , lowercase , lowercase=None ): super().__init__(lowercase ) A_ : Any = speaker_embeddings @classmethod def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: A_ : Any = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) A_ : str = None else: with open(lowercase ) as speaker_embeddings_json: A_ : List[str] = json.load(lowercase ) else: A_ : str = None A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase ) A_ : Optional[int] = {} A_ : Tuple = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": A_ : Union[str, Any] = self._load_voice_preset(lowercase ) A_ : Tuple = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , ) A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' ) A_ : str = tmp_dict with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def _a (self , lowercase = None , **lowercase ): A_ : List[Any] = self.speaker_embeddings[voice_preset] A_ : Optional[Any] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) A_ : int = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) A_ : Tuple = np.load(lowercase ) return voice_preset_dict def _a (self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): A_ : Optional[int] = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ): A_ : Optional[int] = voice_preset + """.npz""" A_ : Any = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase ) A_ : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: A_ : Union[str, Any] = voice_preset return encoded_text
667
1
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase :Any = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values'] def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = IMAGENET_DEFAULT_MEAN , lowercase = IMAGENET_DEFAULT_STD , **lowercase , ): super().__init__(**lowercase ) A_ : Dict = size if size is not None else {"""shortest_edge""": 224} A_ : Tuple = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : List[str] = get_size_dict(lowercase , param_name="""crop_size""" ) A_ : Optional[Any] = do_resize A_ : List[Any] = size A_ : Optional[Any] = resample A_ : Union[str, Any] = do_center_crop A_ : Tuple = crop_size A_ : int = do_rescale A_ : List[str] = rescale_factor A_ : Union[str, Any] = do_normalize A_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN A_ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): A_ : str = get_size_dict(lowercase , default_to_square=lowercase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: A_ : Union[str, Any] = int((256 / 224) * size["""shortest_edge"""] ) A_ : List[Any] = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase ) A_ : Optional[Any] = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( lowercase , size=(size_dict["""height"""], size_dict["""width"""]) , resample=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): A_ : Dict = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): A_ : int = do_resize if do_resize is not None else self.do_resize A_ : Union[str, Any] = resample if resample is not None else self.resample A_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : str = image_mean if image_mean is not None else self.image_mean A_ : Dict = image_std if image_std is not None else self.image_std A_ : Optional[int] = size if size is not None else self.size A_ : Tuple = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size A_ : int = get_size_dict(lowercase , param_name="""crop_size""" ) A_ : Tuple = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A_ : Optional[int] = [to_numpy_array(lowercase ) for image in images] if do_resize: A_ : List[Any] = [self.resize(lowercase , lowercase , lowercase ) for image in images] if do_center_crop: A_ : List[Any] = [self.center_crop(lowercase , lowercase ) for image in images] if do_rescale: A_ : int = [self.rescale(lowercase , lowercase ) for image in images] if do_normalize: A_ : Dict = [self.normalize(lowercase , lowercase , lowercase ) for image in images] A_ : List[str] = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A_ : Union[str, Any] = {"""pixel_values""": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
667
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Union[str, Any] = tempfile.mkdtemp() A_ : List[Any] = BlipImageProcessor() A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase ) processor.save_pretrained(self.tmpdirname ) def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer def _a (self ): shutil.rmtree(self.tmpdirname ) def _a (self ): A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a (self ): A_ : str = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 ) A_ : str = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase ) self.assertIsInstance(processor.qformer_tokenizer , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : List[str] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = self.prepare_image_inputs() A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" ) A_ : Dict = processor(images=lowercase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a (self ): A_ : List[Any] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : Any = self.get_qformer_tokenizer() A_ : List[str] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : str = """lower newer""" A_ : List[Any] = processor(text=lowercase ) A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase ) A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def _a (self ): A_ : int = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Any = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Optional[int] = """lower newer""" A_ : Optional[int] = self.prepare_image_inputs() A_ : Tuple = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(lowercase ): processor() def _a (self ): A_ : Dict = self.get_image_processor() A_ : str = self.get_tokenizer() A_ : Optional[int] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : Optional[int] = processor.batch_decode(lowercase ) A_ : Dict = tokenizer.batch_decode(lowercase ) self.assertListEqual(lowercase , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Dict = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Optional[int] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = """lower newer""" A_ : Optional[Any] = self.prepare_image_inputs() A_ : Any = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
667
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase :int = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :List[Any] = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Any = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Optional[Any] = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str' def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ): super().__init__(**lowercase ) A_ : int = image_size A_ : List[str] = patch_size A_ : Tuple = num_channels A_ : List[str] = max_token_length A_ : int = num_character_labels A_ : str = num_bpe_labels A_ : Tuple = num_wordpiece_labels A_ : Optional[int] = hidden_size A_ : List[Any] = num_hidden_layers A_ : int = num_attention_heads A_ : Tuple = mlp_ratio A_ : str = distilled A_ : Union[str, Any] = layer_norm_eps A_ : str = drop_rate A_ : int = qkv_bias A_ : Dict = attn_drop_rate A_ : List[Any] = drop_path_rate A_ : Any = output_aa_attentions A_ : Union[str, Any] = initializer_range
667
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__(self , lowercase , lowercase=100 , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , ): A_ : Tuple = parent A_ : List[Any] = vocab_size A_ : Optional[Any] = batch_size A_ : List[str] = image_size A_ : Union[str, Any] = patch_size A_ : Tuple = num_channels A_ : int = is_training A_ : Optional[Any] = use_labels A_ : List[str] = hidden_size A_ : Any = num_hidden_layers A_ : int = num_attention_heads A_ : List[Any] = intermediate_size A_ : Tuple = hidden_act A_ : Tuple = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Dict = type_sequence_label_size A_ : Optional[int] = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Any = (image_size // patch_size) ** 2 A_ : Union[str, Any] = num_patches + 1 def _a (self ): A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , ) return config, pixel_values, labels def _a (self , lowercase , lowercase , lowercase ): A_ : Union[str, Any] = FlaxBeitModel(config=lowercase ) A_ : Dict = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a (self , lowercase , lowercase , lowercase ): A_ : Tuple = FlaxBeitForMaskedImageModeling(config=lowercase ) A_ : Tuple = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _a (self , lowercase , lowercase , lowercase ): A_ : Optional[Any] = self.type_sequence_label_size A_ : str = FlaxBeitForImageClassification(config=lowercase ) A_ : Dict = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : int = 1 A_ : List[Any] = FlaxBeitForImageClassification(lowercase ) A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : str = model(lowercase ) def _a (self ): A_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( A_ ), ( A_ ), ( A_ ), ) : List[str] = config_and_inputs A_ : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[str] = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def _a (self ): A_ : List[Any] = FlaxBeitModelTester(self ) A_ : Any = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 ) def _a (self ): self.config_tester.run_common_tests() def _a (self ): A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = model_class(lowercase ) A_ : Any = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : str = [*signature.parameters.keys()] A_ : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase ) def _a (self ): A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A_ : List[str] = self._prepare_for_class(lowercase , lowercase ) A_ : List[Any] = model_class(lowercase ) @jax.jit def model_jitted(lowercase , **lowercase ): return model(pixel_values=lowercase , **lowercase ) with self.subTest("""JIT Enabled""" ): A_ : Dict = model_jitted(**lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): A_ : Optional[int] = model_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def _a (self ): A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def _a (self ): A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase ) def _a (self ): A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def _a (self ): for model_class_name in self.all_model_classes: A_ : Dict = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" ) A_ : str = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowercase ) def a ( ): '''simple docstring''' A_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @require_flax class _lowerCAmelCase ( unittest.TestCase ): @cached_property def _a (self ): return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def _a (self ): A_ : Optional[int] = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ) A_ : Any = self.default_image_processor A_ : Any = prepare_img() A_ : Optional[Any] = image_processor(images=lowercase , return_tensors="""np""" ).pixel_values # prepare bool_masked_pos A_ : Optional[int] = np.ones((1, 196) , dtype=lowercase ) # forward pass A_ : Optional[Any] = model(pixel_values=lowercase , bool_masked_pos=lowercase ) A_ : int = outputs.logits # verify the logits A_ : List[str] = (1, 196, 8192) self.assertEqual(logits.shape , lowercase ) A_ : Optional[int] = np.array( [[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowercase , atol=1E-2 ) ) @slow def _a (self ): A_ : Tuple = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ) A_ : str = self.default_image_processor A_ : str = prepare_img() A_ : Any = image_processor(images=lowercase , return_tensors="""np""" ) # forward pass A_ : Any = model(**lowercase ) A_ : Optional[int] = outputs.logits # verify the logits A_ : Union[str, Any] = (1, 1000) self.assertEqual(logits.shape , lowercase ) A_ : Dict = np.array([-1.23_85, -1.09_87, -1.01_08] ) self.assertTrue(np.allclose(logits[0, :3] , lowercase , atol=1E-4 ) ) A_ : Optional[Any] = 281 self.assertEqual(logits.argmax(-1 ).item() , lowercase ) @slow def _a (self ): A_ : Dict = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ) A_ : Any = self.default_image_processor A_ : Tuple = prepare_img() A_ : str = image_processor(images=lowercase , return_tensors="""np""" ) # forward pass A_ : str = model(**lowercase ) A_ : str = outputs.logits # verify the logits A_ : int = (1, 21841) self.assertEqual(logits.shape , lowercase ) A_ : Union[str, Any] = np.array([1.68_81, -0.27_87, 0.59_01] ) self.assertTrue(np.allclose(logits[0, :3] , lowercase , atol=1E-4 ) ) A_ : str = 2396 self.assertEqual(logits.argmax(-1 ).item() , lowercase )
667
'''simple docstring''' import math from collections.abc import Callable def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : float = xa A_ : float = xa while True: if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ): raise ZeroDivisionError("""float division by zero, could not find root""" ) A_ : float = x_na - ( function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na A_ : Tuple = x_na A_ : List[Any] = x_na def a ( lowerCamelCase__ ): '''simple docstring''' return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
667
1
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : jnp.ndarray __SCREAMING_SNAKE_CASE : jnp.ndarray class _lowerCAmelCase ( nn.Module ): __SCREAMING_SNAKE_CASE : int __SCREAMING_SNAKE_CASE : Tuple[int] = (16, 32, 96, 256) __SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa def _a (self ): A_ : List[Any] = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) A_ : List[str] = [] for i in range(len(self.block_out_channels ) - 1 ): A_ : Dict = self.block_out_channels[i] A_ : Tuple = self.block_out_channels[i + 1] A_ : Tuple = nn.Conv( lowercase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(lowercase ) A_ : Dict = nn.Conv( lowercase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(lowercase ) A_ : Any = blocks A_ : Union[str, Any] = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__(self , lowercase ): A_ : Tuple = self.conv_in(lowercase ) A_ : str = nn.silu(lowercase ) for block in self.blocks: A_ : int = block(lowercase ) A_ : Union[str, Any] = nn.silu(lowercase ) A_ : Optional[int] = self.conv_out(lowercase ) return embedding @flax_register_to_config class _lowerCAmelCase ( nn.Module , __UpperCAmelCase , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False __SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1_280, 1_280) __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 __SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None __SCREAMING_SNAKE_CASE : int = 1_280 __SCREAMING_SNAKE_CASE : float = 0.0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : str = "rgb" __SCREAMING_SNAKE_CASE : Tuple[int] = (16, 32, 96, 256) def _a (self , lowercase ): # init input tensors A_ : List[str] = (1, self.in_channels, self.sample_size, self.sample_size) A_ : Dict = jnp.zeros(lowercase , dtype=jnp.floataa ) A_ : str = jnp.ones((1,) , dtype=jnp.intaa ) A_ : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) A_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8) A_ : int = jnp.zeros(lowercase , dtype=jnp.floataa ) A_, A_ : Union[str, Any] = jax.random.split(lowercase ) A_ : Any = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(lowercase , lowercase , lowercase , lowercase , lowercase )["params"] def _a (self ): A_ : Union[str, Any] = self.block_out_channels A_ : List[Any] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. A_ : int = self.num_attention_heads or self.attention_head_dim # input A_ : Union[str, Any] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time A_ : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) A_ : Optional[int] = FlaxTimestepEmbedding(lowercase , dtype=self.dtype ) A_ : str = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) A_ : Optional[int] = self.only_cross_attention if isinstance(lowercase , lowercase ): A_ : List[Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase , lowercase ): A_ : str = (num_attention_heads,) * len(self.down_block_types ) # down A_ : Union[str, Any] = [] A_ : Optional[Any] = [] A_ : Optional[int] = block_out_channels[0] A_ : Any = nn.Conv( lowercase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowercase ) for i, down_block_type in enumerate(self.down_block_types ): A_ : int = output_channel A_ : Union[str, Any] = block_out_channels[i] A_ : List[str] = i == len(lowercase ) - 1 if down_block_type == "CrossAttnDownBlock2D": A_ : Tuple = FlaxCrossAttnDownBlockaD( in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: A_ : Tuple = FlaxDownBlockaD( in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase ) for _ in range(self.layers_per_block ): A_ : int = nn.Conv( lowercase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowercase ) if not is_final_block: A_ : int = nn.Conv( lowercase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(lowercase ) A_ : Union[str, Any] = down_blocks A_ : Any = controlnet_down_blocks # mid A_ : Optional[int] = block_out_channels[-1] A_ : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn( in_channels=lowercase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) A_ : Tuple = nn.Conv( lowercase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__(self , lowercase , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = True , lowercase = False , ): A_ : str = self.controlnet_conditioning_channel_order if channel_order == "bgr": A_ : List[str] = jnp.flip(lowercase , axis=1 ) # 1. time if not isinstance(lowercase , jnp.ndarray ): A_ : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0: A_ : str = timesteps.astype(dtype=jnp.floataa ) A_ : Tuple = jnp.expand_dims(lowercase , 0 ) A_ : List[str] = self.time_proj(lowercase ) A_ : Dict = self.time_embedding(lowercase ) # 2. pre-process A_ : int = jnp.transpose(lowercase , (0, 2, 3, 1) ) A_ : str = self.conv_in(lowercase ) A_ : Any = jnp.transpose(lowercase , (0, 2, 3, 1) ) A_ : str = self.controlnet_cond_embedding(lowercase ) sample += controlnet_cond # 3. down A_ : Tuple = (sample,) for down_block in self.down_blocks: if isinstance(lowercase , lowercase ): A_, A_ : str = down_block(lowercase , lowercase , lowercase , deterministic=not train ) else: A_, A_ : Union[str, Any] = down_block(lowercase , lowercase , deterministic=not train ) down_block_res_samples += res_samples # 4. mid A_ : List[str] = self.mid_block(lowercase , lowercase , lowercase , deterministic=not train ) # 5. contronet blocks A_ : Dict = () for down_block_res_sample, controlnet_block in zip(lowercase , self.controlnet_down_blocks ): A_ : Optional[int] = controlnet_block(lowercase ) controlnet_down_block_res_samples += (down_block_res_sample,) A_ : str = controlnet_down_block_res_samples A_ : List[str] = self.controlnet_mid_block(lowercase ) # 6. scaling A_ : Tuple = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=lowercase , mid_block_res_sample=lowercase )
667
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase :Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values'] def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ): super().__init__(**lowercase ) A_ : Dict = size if size is not None else {"""shortest_edge""": 224} A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" ) A_ : str = do_resize A_ : str = size A_ : List[str] = resample A_ : Any = do_center_crop A_ : Union[str, Any] = crop_size A_ : List[Any] = do_rescale A_ : List[Any] = rescale_factor A_ : Dict = do_normalize A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Union[str, Any] = do_convert_rgb def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): A_ : List[str] = do_resize if do_resize is not None else self.do_resize A_ : int = size if size is not None else self.size A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase ) A_ : int = resample if resample is not None else self.resample A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase ) A_ : str = do_rescale if do_rescale is not None else self.do_rescale A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : Any = image_mean if image_mean is not None else self.image_mean A_ : Any = image_std if image_std is not None else self.image_std A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : List[str] = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : int = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A_ : int = [to_numpy_array(lowercase ) for image in images] if do_resize: A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
667
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Optional[int] = logging.get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' A_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" A_ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: A_ : Tuple = """""" else: A_ : Optional[int] = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) A_ : int = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A_ : Optional[Any] = in_proj_weight[ : config.hidden_size, : ] A_ : Union[str, Any] = in_proj_bias[: config.hidden_size] A_ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] A_ : Any = in_proj_bias[-config.hidden_size :] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = dct.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def a ( ): '''simple docstring''' A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Any = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = DeiTConfig() # all deit models have fine-tuned heads A_ : Dict = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size A_ : int = 10_00 A_ : Optional[Any] = """huggingface/label-files""" A_ : Any = """imagenet-1k-id2label.json""" A_ : int = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) A_ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} A_ : List[Any] = idalabel A_ : Dict = {v: k for k, v in idalabel.items()} A_ : List[str] = int(deit_name[-6:-4] ) A_ : str = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): A_ : int = 1_92 A_ : Dict = 7_68 A_ : str = 12 A_ : Dict = 3 elif deit_name[9:].startswith("""small""" ): A_ : int = 3_84 A_ : List[str] = 15_36 A_ : Tuple = 12 A_ : List[str] = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): A_ : Dict = 10_24 A_ : Union[str, Any] = 40_96 A_ : int = 24 A_ : Any = 16 # load original model from timm A_ : Union[str, Any] = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ : Optional[Any] = timm_model.state_dict() A_ : str = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # load HuggingFace model A_ : Dict = DeiTForImageClassificationWithTeacher(lowerCamelCase__ ).eval() model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image, prepared by DeiTImageProcessor A_ : Union[str, Any] = int( (2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 A_ : List[Any] = DeiTImageProcessor(size=lowerCamelCase__ , crop_size=config.image_size ) A_ : Optional[int] = image_processor(images=prepare_img() , return_tensors="""pt""" ) A_ : Optional[Any] = encoding["""pixel_values"""] A_ : Dict = model(lowerCamelCase__ ) A_ : int = timm_model(lowerCamelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1E-3 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--deit_name''', default='''vit_deit_base_distilled_patch16_224''', type=str, help='''Name of the DeiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCamelCase :Tuple = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
667
'''simple docstring''' class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase ): A_ : List[str] = name A_ : Dict = value A_ : Optional[int] = weight def __repr__(self ): return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def _a (self ): return self.value def _a (self ): return self.name def _a (self ): return self.weight def _a (self ): return self.value / self.weight def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = [] for i in range(len(lowerCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ ) A_ : Any = [] A_, A_ : Tuple = 0.0, 0.0 for i in range(len(lowerCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyVaaPriorPipeline __SCREAMING_SNAKE_CASE : Dict = ['prompt'] __SCREAMING_SNAKE_CASE : Union[str, Any] = ['prompt', 'negative_prompt'] __SCREAMING_SNAKE_CASE : Any = [ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : Any = False @property def _a (self ): return 32 @property def _a (self ): return 32 @property def _a (self ): return self.time_input_dim @property def _a (self ): return self.time_input_dim * 4 @property def _a (self ): return 100 @property def _a (self ): A_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def _a (self ): torch.manual_seed(0 ) A_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(lowercase ) @property def _a (self ): torch.manual_seed(0 ) A_ : Dict = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } A_ : List[str] = PriorTransformer(**lowercase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 A_ : Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def _a (self ): torch.manual_seed(0 ) A_ : Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) A_ : Union[str, Any] = CLIPVisionModelWithProjection(lowercase ) return model @property def _a (self ): A_ : Any = CLIPImageProcessor( crop_size=224 , do_center_crop=lowercase , do_normalize=lowercase , do_resize=lowercase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor def _a (self ): A_ : Tuple = self.dummy_prior A_ : Dict = self.dummy_image_encoder A_ : List[Any] = self.dummy_text_encoder A_ : int = self.dummy_tokenizer A_ : Any = self.dummy_image_processor A_ : Union[str, Any] = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=lowercase , clip_sample_range=10.0 , ) A_ : str = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def _a (self , lowercase , lowercase=0 ): if str(lowercase ).startswith("""mps""" ): A_ : Any = torch.manual_seed(lowercase ) else: A_ : List[Any] = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : Any = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def _a (self ): A_ : str = """cpu""" A_ : List[str] = self.get_dummy_components() A_ : List[Any] = self.pipeline_class(**lowercase ) A_ : Tuple = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : str = pipe(**self.get_dummy_inputs(lowercase ) ) A_ : Tuple = output.image_embeds A_ : List[str] = pipe( **self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0] A_ : Union[str, Any] = image[0, -10:] A_ : Optional[Any] = image_from_tuple[0, -10:] assert image.shape == (1, 32) A_ : List[str] = np.array( [-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _a (self ): A_ : Optional[int] = torch_device == """cpu""" A_ : Union[str, Any] = True A_ : str = False self._test_inference_batch_single_identical( test_max_difference=lowercase , relax_max_difference=lowercase , test_mean_pixel_difference=lowercase , ) @skip_mps def _a (self ): A_ : List[str] = torch_device == """cpu""" A_ : Tuple = False self._test_attention_slicing_forward_pass( test_max_difference=lowercase , test_mean_pixel_difference=lowercase , )
667
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCamelCase :int = logging.getLogger(__name__) lowerCamelCase :List[Any] = 5_0 # max width of layer names lowerCamelCase :List[Any] = 7_0 # max width of quantizer names def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" ) group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" ) group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" ) group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" ) group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" ) group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" ) group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" ) group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" ) group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" ) group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" ) group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" ) group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" ) group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" ) group.add_argument( """--recalibrate-weights""" , action="""store_true""" , help=( """recalibrate weight amaxes by taking the max of the weights.""" """ amaxes will be computed with the current quantization granularity (axis).""" ) , ) def a ( lowerCamelCase__ ): '''simple docstring''' if args.calibrator == "max": A_ : Union[str, Any] = """max""" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("""Specify --percentile when using percentile calibrator""" ) A_ : int = """histogram""" elif args.calibrator == "mse": A_ : Dict = """histogram""" else: raise ValueError(f'Invalid calibrator {args.calibrator}' ) A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ ) A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ): '''simple docstring''' logger.info("""Configuring Model for Quantization""" ) logger.info(f'using quantization package {pytorch_quantization.__file__}' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ ) if args.quant_disable: set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ ) if args.recalibrate_weights: recalibrate_weights(lowerCamelCase__ ) if args.fuse_qkv: fuse_qkv(lowerCamelCase__ , lowerCamelCase__ ) if args.clip_gelu: clip_gelu(lowerCamelCase__ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' logger.info("""Enabling Calibration""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'{name:80}: {module}' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' logger.info("""Loading calibrated amax""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("""percentile""" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): for mod in [qq, qk, qv]: if not hasattr(lowerCamelCase__ , """_amax""" ): print(""" WARNING: NO AMAX BUFFER""" ) return A_ : List[Any] = qq._amax.detach().item() A_ : Optional[int] = qk._amax.detach().item() A_ : Dict = qv._amax.detach().item() A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) qq._amax.fill_(lowerCamelCase__ ) qk._amax.fill_(lowerCamelCase__ ) qv._amax.fill_(lowerCamelCase__ ) logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' ) for name, mod in model.named_modules(): if name.endswith(""".attention.self""" ): logger.info(f'FUSE_QKV: {name:{name_width}}' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ): A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ ) A_ : Dict = mod._input_quantizer._amax.data.detach().item() logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None: A_ : Tuple = mod.weight.shape[0] A_ : Dict = mod._weight_quantizer._amax.detach() A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ): if not hasattr(mod.weight_quantizer , """_amax""" ): print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach() logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' ) A_ : str = amax def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ): '''simple docstring''' if ignore is None: A_ : int = [] elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = [ignore] A_ : Optional[Any] = 0 for name, mod in model.named_modules(): if not hasattr(lowerCamelCase__ , """weight""" ): continue A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) ) for name, mod in model.named_modules(): A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ ) A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ ) if not hasattr(lowerCamelCase__ , """weight""" ): continue if type(lowerCamelCase__ ) in ignore: continue if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]: continue A_ : Optional[int] = f'Act:{input_q.extra_repr()}' A_ : Dict = f'Wgt:{weight_q.extra_repr()}' A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}' if len(lowerCamelCase__ ) <= line_width: logger.info(lowerCamelCase__ ) else: logger.info(f'{name:{name_width}} {act_str}' ) logger.info(f'{" ":{name_width}} {wgt_str}' ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = 0 for name, mod in model.named_modules(): if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ): print(f'{name:80} {mod}' ) count += 1 print(f'{count} TensorQuantizers found in model' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if quantizer_mod is not None: assert hasattr(lowerCamelCase__ , lowerCamelCase__ ) setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: logger.warning(f'{name} has no {quantizer}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}' for k, v in kwargs.items(): s += f' {k}={v}' if which in ["input", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) if which in ["weight", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) elif name.endswith("""_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): A_ : Dict = f'Warning: changing {name:{name_width}}' for k, v in kwargs.items(): s += f' {k}={v}' setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ )
667
1
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : List[Any] = 0 @slow def _a (self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) def _a (self ): A_ : str = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def _a (self ): A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) # Check that tokenizer_type ≠ model_type A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) @require_tokenizers def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" ) self.assertIsInstance(lowercase , lowercase ) def _a (self ): with pytest.raises(lowercase ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) if isinstance(lowercase , lowercase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase ) else: self.assertEqual(tokenizer.do_lower_case , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def _a (self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai A_ : List[str] = TOKENIZER_MAPPING.values() A_ : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(lowercase ) @require_tokenizers def _a (self ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase ) @require_tokenizers def _a (self ): A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase ) A_ : List[Any] = """Hello, world. How are you?""" A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase ) A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def _a (self ): A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(lowercase ) , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def _a (self ): A_ : Any = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(lowercase , lowercase ) def _a (self ): # Check we can load the tokenizer config of an online model. A_ : Tuple = get_tokenizer_config("""bert-base-cased""" ) A_ : Any = config.pop("""_commit_hash""" , lowercase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(lowercase , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. A_ : List[Any] = get_tokenizer_config(lowercase ) self.assertDictEqual(lowercase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. A_ : int = AutoTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Dict = get_tokenizer_config(lowercase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) A_ : Tuple = CustomTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) # Can register in two steps AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: A_ : str = BertTokenizerFast.from_pretrained(lowercase ) bert_tokenizer.save_pretrained(lowercase ) A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase ): A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase ): A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : str = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def _a (self ): class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = False class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = NewTokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = False try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # If remote code is not set, the default is to use local A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. A_ : int = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : List[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): A_ : Dict = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : Optional[int] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def _a (self ): with self.assertRaisesRegex( lowercase , """bert-base is not a local folder and is not a valid model identifier""" ): A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" ) def _a (self ): with self.assertRaisesRegex( lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" ) def _a (self ): # Make sure we have cached the tokenizer. A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
667
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : List[Any] = 0 @slow def _a (self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) def _a (self ): A_ : str = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def _a (self ): A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) # Check that tokenizer_type ≠ model_type A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) @require_tokenizers def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" ) self.assertIsInstance(lowercase , lowercase ) def _a (self ): with pytest.raises(lowercase ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) if isinstance(lowercase , lowercase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase ) else: self.assertEqual(tokenizer.do_lower_case , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def _a (self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai A_ : List[str] = TOKENIZER_MAPPING.values() A_ : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(lowercase ) @require_tokenizers def _a (self ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase ) @require_tokenizers def _a (self ): A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase ) A_ : List[Any] = """Hello, world. How are you?""" A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase ) A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def _a (self ): A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(lowercase ) , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def _a (self ): A_ : Any = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(lowercase , lowercase ) def _a (self ): # Check we can load the tokenizer config of an online model. A_ : Tuple = get_tokenizer_config("""bert-base-cased""" ) A_ : Any = config.pop("""_commit_hash""" , lowercase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(lowercase , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. A_ : List[Any] = get_tokenizer_config(lowercase ) self.assertDictEqual(lowercase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. A_ : int = AutoTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Dict = get_tokenizer_config(lowercase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) A_ : Tuple = CustomTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) # Can register in two steps AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: A_ : str = BertTokenizerFast.from_pretrained(lowercase ) bert_tokenizer.save_pretrained(lowercase ) A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase ): A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase ): A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : str = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def _a (self ): class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = False class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = NewTokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = False try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # If remote code is not set, the default is to use local A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. A_ : int = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : List[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): A_ : Dict = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : Optional[int] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def _a (self ): with self.assertRaisesRegex( lowercase , """bert-base is not a local folder and is not a valid model identifier""" ): A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" ) def _a (self ): with self.assertRaisesRegex( lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" ) def _a (self ): # Make sure we have cached the tokenizer. A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
667
1
'''simple docstring''' import os def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = len(grid[0] ) A_ : str = len(lowerCamelCase__ ) A_ : List[str] = 0 A_ : int = 0 A_ : List[str] = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(lowerCamelCase__ ): for j in range(n_rows - 3 ): A_ : Any = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] A_ : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: A_ : List[Any] = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: A_ : Optional[Any] = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) A_ : int = max( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if max_product > largest: A_ : List[str] = max_product return largest def a ( ): '''simple docstring''' A_ : str = [] with open(os.path.dirname(lowerCamelCase__ ) + """/grid.txt""" ) as file: for line in file: grid.append(line.strip("""\n""" ).split(""" """ ) ) A_ : List[Any] = [[int(lowerCamelCase__ ) for i in grid[j]] for j in range(len(lowerCamelCase__ ) )] return largest_product(lowerCamelCase__ ) if __name__ == "__main__": print(solution())
667
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) A_ : int = number_of_bytes // partitions A_ : Union[str, Any] = [] for i in range(lowerCamelCase__ ): A_ : Dict = i * bytes_per_partition + 1 A_ : Tuple = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase ): A_ : List[str] = name A_ : Dict = value A_ : Optional[int] = weight def __repr__(self ): return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def _a (self ): return self.value def _a (self ): return self.name def _a (self ): return self.weight def _a (self ): return self.value / self.weight def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = [] for i in range(len(lowerCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ ) A_ : Any = [] A_, A_ : Tuple = 0.0, 0.0 for i in range(len(lowerCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
667
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Any = logging.get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' ) A_ : Union[str, Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : str = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Union[str, Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = dct.pop(lowerCamelCase__ ) A_ : Optional[int] = val def a ( lowerCamelCase__ ): '''simple docstring''' if "handwritten" in checkpoint_url: A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ ) A_ : int = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : List[str] = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Union[str, Any] = 10_24 A_ : List[Any] = 40_96 A_ : Dict = 24 A_ : List[str] = 16 A_ : Union[str, Any] = 10_24 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Optional[Any] = False A_ : Union[str, Any] = """relu""" A_ : List[str] = 10_24 A_ : Tuple = True A_ : Tuple = False A_ : List[str] = False # load HuggingFace model A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ ) A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ ) A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) model.eval() # load state_dict of original model, rename some keys A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""] A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ ) if key.startswith("""decoder""" ) and "output_projection" not in key: A_ : str = val else: A_ : List[str] = val # load state dict model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image A_ : str = ViTImageProcessor(size=encoder_config.image_size ) A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" ) A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values # verify logits A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ) A_ : Dict = outputs.logits A_ : str = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : Any = torch.tensor( [-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] ) elif "trocr-base-printed" in checkpoint_url: A_ : List[Any] = torch.tensor( [-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase :Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
667
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase :List[str] = { '''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig'''] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = ['''ConvNextFeatureExtractor'''] lowerCamelCase :List[Any] = ['''ConvNextImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = [ '''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvNextForImageClassification''', '''ConvNextModel''', '''ConvNextPreTrainedModel''', '''ConvNextBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Dict = [ '''TFConvNextForImageClassification''', '''TFConvNextModel''', '''TFConvNextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
667
'''simple docstring''' print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
667
1
'''simple docstring''' import os def a ( ): '''simple docstring''' with open(os.path.dirname(lowerCamelCase__ ) + """/p022_names.txt""" ) as file: A_ : List[str] = str(file.readlines()[0] ) A_ : Dict = names.replace("""\"""" , """""" ).split(""",""" ) names.sort() A_ : List[str] = 0 A_ : str = 0 for i, name in enumerate(lowerCamelCase__ ): for letter in name: name_score += ord(lowerCamelCase__ ) - 64 total_score += (i + 1) * name_score A_ : Tuple = 0 return total_score if __name__ == "__main__": print(solution())
667
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY) def a ( ): '''simple docstring''' A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ ) # assert negative_img array for at least one True assert negative_img.any() def a ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def a ( ): '''simple docstring''' A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a ( ): '''simple docstring''' A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() A_ : List[Any] = canny.canny(lowerCamelCase__ ) # assert canny array for at least one True assert canny_array.any() def a ( ): '''simple docstring''' assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all() def a ( ): '''simple docstring''' A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ ) assert res.any() def a ( ): '''simple docstring''' assert med.median_filter(lowerCamelCase__ , 3 ).any() def a ( ): '''simple docstring''' A_, A_ : int = sob.sobel_filter(lowerCamelCase__ ) assert grad.any() and theta.any() def a ( ): '''simple docstring''' A_ : int = sp.make_sepia(lowerCamelCase__ , 20 ) assert sepia.all() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def a ( ): '''simple docstring''' A_ : int = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 ) # Test for get_neighbors_pixel function() return not None A_ : str = 0 A_ : str = 0 A_ : Dict = image[x_coordinate][y_coordinate] A_ : Optional[Any] = lbp.get_neighbors_pixel( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A_ : str = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert lbp_image.any()
667
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=224 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ): A_ : List[Any] = size if size is not None else {"""height""": 18, """width""": 18} A_ : Any = parent A_ : Optional[int] = batch_size A_ : Optional[int] = num_channels A_ : int = image_size A_ : Dict = min_resolution A_ : Dict = max_resolution A_ : str = do_resize A_ : Tuple = size A_ : Dict = do_normalize A_ : str = image_mean A_ : Dict = image_std def _a (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor if is_vision_available() else None def _a (self ): A_ : str = EfficientFormerImageProcessorTester(self ) @property def _a (self ): return self.image_proc_tester.prepare_image_processor_dict() def _a (self ): A_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , """image_mean""" ) ) self.assertTrue(hasattr(lowercase , """image_std""" ) ) self.assertTrue(hasattr(lowercase , """do_normalize""" ) ) self.assertTrue(hasattr(lowercase , """do_resize""" ) ) self.assertTrue(hasattr(lowercase , """size""" ) ) def _a (self ): pass def _a (self ): # Initialize image_processor A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A_ : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched A_ : Optional[int] = image_processor(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _a (self ): # Initialize image_processor A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) # Test not batched input A_ : List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched A_ : List[Any] = image_processor(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _a (self ): # Initialize image_processor A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input A_ : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched A_ : Any = image_processor(lowercase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
667
'''simple docstring''' from importlib import import_module from .logging import get_logger lowerCamelCase :Dict = get_logger(__name__) class _lowerCAmelCase : def __init__(self , lowercase , lowercase=None ): A_ : Optional[int] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , lowercase , getattr(lowercase , lowercase ) ) A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Dict = [] def __init__(self , lowercase , lowercase , lowercase , lowercase=None ): A_ : Union[str, Any] = obj A_ : Optional[int] = target A_ : Optional[Any] = new A_ : Optional[Any] = target.split(""".""" )[0] A_ : Tuple = {} A_ : Optional[int] = attrs or [] def __enter__(self ): *A_, A_ : Optional[Any] = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase ) ): try: A_ : Any = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): A_ : int = getattr(self.obj , lowercase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): A_ : str = obj_attr # patch at top level setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) ) A_ : Optional[Any] = getattr(self.obj , lowercase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) ) A_ : Dict = getattr(lowercase , lowercase ) # finally set the target attribute setattr(lowercase , lowercase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowercase ) is attr_value: A_ : Dict = getattr(self.obj , lowercase ) setattr(self.obj , lowercase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" A_ : int = globals()["""__builtins__"""][target_attr] setattr(self.obj , lowercase , self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__(self , *lowercase ): for attr in list(self.original ): setattr(self.obj , lowercase , self.original.pop(lowercase ) ) def _a (self ): self.__enter__() self._active_patches.append(self ) def _a (self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
667
1
'''simple docstring''' import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowerCamelCase :int = logging.get_logger(__name__) class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Any = None @experimental def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return _map_with_joblib(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = num_proc if num_proc <= len(lowerCamelCase__ ) else len(lowerCamelCase__ ) A_ : str = [] # We organize the splits ourselve (contiguous splits) for index in range(lowerCamelCase__ ): A_ : List[str] = len(lowerCamelCase__ ) // num_proc A_ : Optional[Any] = len(lowerCamelCase__ ) % num_proc A_ : Optional[int] = div * index + min(lowerCamelCase__ , lowerCamelCase__ ) A_ : int = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(lowerCamelCase__ ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f'Error dividing inputs iterable among processes. ' f'Total number of objects {len(lowerCamelCase__ )}, ' f'length: {sum(len(i[1] ) for i in split_kwds )}' ) logger.info( f'Spawning {num_proc} processes for {len(lowerCamelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}' ) A_, A_ : Optional[int] = None, None if not disable_tqdm: A_, A_ : Dict = (RLock(),), tqdm.set_lock with Pool(lowerCamelCase__ , initargs=lowerCamelCase__ , initializer=lowerCamelCase__ ) as pool: A_ : Tuple = pool.map(lowerCamelCase__ , lowerCamelCase__ ) logger.info(f'Finished {num_proc} processes' ) A_ : Tuple = [obj for proc_res in mapped for obj in proc_res] logger.info(f'Unpacked {len(lowerCamelCase__ )} objects' ) return mapped def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowerCamelCase__ ): return joblib.Parallel()( joblib.delayed(lowerCamelCase__ )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: A_ : Tuple = None
667
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase :int = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :List[Any] = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Any = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Optional[Any] = logging.get_logger(__name__) lowerCamelCase :List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = 'rwkv' __SCREAMING_SNAKE_CASE : List[Any] = {'max_position_embeddings': 'context_length'} def __init__(self , lowercase=50277 , lowercase=1024 , lowercase=4096 , lowercase=32 , lowercase=None , lowercase=None , lowercase=1E-5 , lowercase=0 , lowercase=0 , lowercase=6 , lowercase=False , lowercase=True , **lowercase , ): A_ : Any = vocab_size A_ : str = context_length A_ : List[str] = hidden_size A_ : Tuple = num_hidden_layers A_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size A_ : List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size A_ : Optional[Any] = layer_norm_epsilon A_ : Optional[int] = rescale_every A_ : List[Any] = use_cache A_ : Tuple = bos_token_id A_ : List[str] = eos_token_id super().__init__( tie_word_embeddings=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
667
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ): super().__init__() self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase ) # create a imagenet -> id dictionary for easier use A_ : str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): A_ : Optional[Any] = int(lowercase ) A_ : List[Any] = dict(sorted(self.labels.items() ) ) def _a (self , lowercase ): if not isinstance(lowercase , lowercase ): A_ : Optional[int] = list(lowercase ) for l in label: if l not in self.labels: raise ValueError( F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ): A_ : Tuple = len(lowercase ) A_ : Optional[Any] = self.transformer.config.sample_size A_ : int = self.transformer.config.in_channels A_ : Optional[int] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , ) A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 ) A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device ) A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: A_ : List[Any] = latent_model_input[: len(lowercase ) // 2] A_ : List[str] = torch.cat([half, half] , dim=0 ) A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase ) A_ : Tuple = t if not torch.is_tensor(lowercase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) A_ : Optional[Any] = latent_model_input.device.type == """mps""" if isinstance(lowercase , lowercase ): A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa else: A_ : List[Any] = torch.intaa if is_mps else torch.intaa A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: A_ : List[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A_ : int = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output A_ : List[Any] = self.transformer( lowercase , timestep=lowercase , class_labels=lowercase ).sample # perform guidance if guidance_scale > 1: A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 ) A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps) A_ : str = torch.cat([half_eps, half_eps] , dim=0 ) A_ : Optional[int] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: A_, A_ : int = torch.split(lowercase , lowercase , dim=1 ) else: A_ : Optional[int] = noise_pred # compute previous image: x_t -> x_t-1 A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample if guidance_scale > 1: A_, A_ : int = latent_model_input.chunk(2 , dim=0 ) else: A_ : Union[str, Any] = latent_model_input A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents A_ : List[Any] = self.vae.decode(lowercase ).sample A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A_ : int = self.numpy_to_pil(lowercase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowercase )
667
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ): super().__init__() self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase ) # create a imagenet -> id dictionary for easier use A_ : str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): A_ : Optional[Any] = int(lowercase ) A_ : List[Any] = dict(sorted(self.labels.items() ) ) def _a (self , lowercase ): if not isinstance(lowercase , lowercase ): A_ : Optional[int] = list(lowercase ) for l in label: if l not in self.labels: raise ValueError( F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ): A_ : Tuple = len(lowercase ) A_ : Optional[Any] = self.transformer.config.sample_size A_ : int = self.transformer.config.in_channels A_ : Optional[int] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , ) A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 ) A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device ) A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: A_ : List[Any] = latent_model_input[: len(lowercase ) // 2] A_ : List[str] = torch.cat([half, half] , dim=0 ) A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase ) A_ : Tuple = t if not torch.is_tensor(lowercase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) A_ : Optional[Any] = latent_model_input.device.type == """mps""" if isinstance(lowercase , lowercase ): A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa else: A_ : List[Any] = torch.intaa if is_mps else torch.intaa A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: A_ : List[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A_ : int = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output A_ : List[Any] = self.transformer( lowercase , timestep=lowercase , class_labels=lowercase ).sample # perform guidance if guidance_scale > 1: A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 ) A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps) A_ : str = torch.cat([half_eps, half_eps] , dim=0 ) A_ : Optional[int] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: A_, A_ : int = torch.split(lowercase , lowercase , dim=1 ) else: A_ : Optional[int] = noise_pred # compute previous image: x_t -> x_t-1 A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample if guidance_scale > 1: A_, A_ : int = latent_model_input.chunk(2 , dim=0 ) else: A_ : Union[str, Any] = latent_model_input A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents A_ : List[Any] = self.vae.decode(lowercase ).sample A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A_ : int = self.numpy_to_pil(lowercase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowercase )
667
'''simple docstring''' import math lowerCamelCase :int = 1_0 lowerCamelCase :List[Any] = 7 lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def a ( lowerCamelCase__ = 20 ): '''simple docstring''' A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ ) A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total) return f'{result:.9f}' if __name__ == "__main__": print(solution(2_0))
667
1
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
667
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :List[Any] = logging.get_logger(__name__) lowerCamelCase :Union[str, Any] = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model' __SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values'] __SCREAMING_SNAKE_CASE : List[Any] = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ): A_ : Tuple = vocab_size A_ : str = hidden_size A_ : Optional[Any] = d_kv A_ : Tuple = d_ff A_ : str = num_layers A_ : int = num_heads A_ : Dict = relative_attention_num_buckets A_ : Optional[Any] = relative_attention_max_distance A_ : Dict = dropout_rate A_ : Optional[int] = layer_norm_epsilon A_ : Dict = initializer_factor A_ : Any = use_cache A_ : int = eos_token_id A_ : Tuple = decoder_start_token_id # for backwards compatibility A_ : str = dense_act_fn super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , ) @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : int = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model' def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ): super().__init__(**lowercase ) A_ : List[str] = hidden_size A_ : Optional[int] = patch_embed_hidden_size A_ : Any = d_ff A_ : str = dropout_rate A_ : Dict = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : List[Any] = initializer_range A_ : List[str] = initializer_factor A_ : Dict = attention_dropout A_ : Optional[Any] = layer_norm_eps A_ : Optional[Any] = dense_act_fn A_ : List[Any] = seq_len A_ : Tuple = relative_attention_num_buckets A_ : Any = relative_attention_max_distance A_ : int = d_kv @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : Tuple = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = 'pix2struct' __SCREAMING_SNAKE_CASE : List[Any] = True def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ): super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase ) if text_config is None: A_ : Optional[Any] = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) A_ : Tuple = PixaStructTextConfig(**lowercase ) A_ : List[str] = PixaStructVisionConfig(**lowercase ) A_ : Dict = self.text_config.decoder_start_token_id A_ : Union[str, Any] = self.text_config.pad_token_id A_ : str = self.text_config.eos_token_id A_ : List[str] = initializer_factor A_ : int = initializer_range A_ : Tuple = self.initializer_range A_ : Tuple = self.initializer_range A_ : List[str] = is_vqa @classmethod def _a (cls , lowercase , lowercase , **lowercase ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase ) def _a (self ): A_ : Optional[Any] = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : List[Any] = self.vision_config.to_dict() A_ : List[str] = self.__class__.model_type return output
667
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :int = logging.get_logger(__name__) lowerCamelCase :Optional[Any] = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = 'open-llama' def __init__(self , lowercase=100000 , lowercase=4096 , lowercase=11008 , lowercase=32 , lowercase=32 , lowercase="silu" , lowercase=2048 , lowercase=0.02 , lowercase=1E-6 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=True , lowercase=None , **lowercase , ): A_ : Any = vocab_size A_ : Dict = max_position_embeddings A_ : Dict = hidden_size A_ : Dict = intermediate_size A_ : List[Any] = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Optional[Any] = hidden_act A_ : Union[str, Any] = initializer_range A_ : Optional[int] = rms_norm_eps A_ : Optional[int] = use_cache A_ : str = kwargs.pop( """use_memorry_efficient_attention""" , lowercase ) A_ : int = hidden_dropout_prob A_ : int = attention_dropout_prob A_ : int = use_stable_embedding A_ : Union[str, Any] = shared_input_output_embedding A_ : Optional[Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase , ) def _a (self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F'got {self.rope_scaling}' ) A_ : Tuple = self.rope_scaling.get("""type""" , lowercase ) A_ : Union[str, Any] = self.rope_scaling.get("""factor""" , lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase , lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
667
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available lowerCamelCase :Union[str, Any] = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :int = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Tuple = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : int = tf.convert_to_tensor( [ [ 8.2_22_09_91, # 3rd highest value; idx. 0 -0.5_62_00_44, 5.23_22_97_52, 4.0_38_63_93, -6.8_79_83_78, -0.54_78_58_02, -3.2_01_21_53, 2.92_77_71_76, 1.88_17_19_53, 7.35_34_12_76, # 5th highest value; idx. 9 8.43_20_78_33, # 2nd highest value; idx. 10 -9.85_71_18_36, -5.96_20_92_36, -1.13_03_91_61, -7.1_11_52_94, -0.8_36_96_33, -5.3_18_64_08, 7.06_42_74_07, 0.81_36_93_44, -0.82_02_38_17, -5.9_17_97_96, 0.58_81_34_43, -6.99_77_84_38, 4.71_55_11_89, -0.18_77_16_37, 7.44_02_07_59, # 4th highest value; idx. 25 9.38_45_09_87, # 1st highest value; idx. 26 2.12_66_29_41, -9.32_56_20_38, 2.35_65_25_22, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_42_55_18, 4.53_13_92_38, -5.57_51_04_64, -6.28_03_06_99, -7.19_52_95_03, -4.02_12_25_51, 1.39_33_70_37, -6.06_70_70_57, 1.59_48_05_17, -9.64_31_19, 0.03_90_77_99, 0.67_23_17_62, -8.88_20_67_26, 6.27_11_59_22, # 4th highest value; idx. 13 2.28_52_07_23, 4.82_76_75_06, 4.30_42_13_68, 8.8_27_53_13, # 2nd highest value; idx. 17 5.44_02_99_58, # 5th highest value; idx. 18 -4.4_73_57_94, 7.38_57_95_36, # 3rd highest value; idx. 20 -2.91_05_16_63, 2.61_94_60_77, -2.5_67_47_62, -9.48_95_93_02, -4.02_92_26_45, -1.35_41_69_18, 9.67_70_23_23, # 1st highest value; idx. 27 -5.89_47_85_53, 1.85_37_04_67, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) A_ : List[str] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above A_ : str = tf.convert_to_tensor( [8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above A_ : Union[str, Any] = tf_top_k_top_p_filtering(lowercase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) A_ : int = output[output != -float("""inf""" )] A_ : Optional[Any] = tf.cast( tf.where(tf.not_equal(lowercase , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(lowercase , lowercase , rtol=1E-12 ) tf.debugging.assert_equal(lowercase , lowercase ) @require_tf class _lowerCAmelCase ( unittest.TestCase , __UpperCAmelCase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): __SCREAMING_SNAKE_CASE : Any = { 'AutoModelForCausalLM': TFAutoModelForCausalLM, 'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq, 'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM, 'AutoModelForVision2Seq': TFAutoModelForVisionaSeq, 'LogitsProcessorList': TFLogitsProcessorList, 'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor, 'create_tensor_fn': tf.convert_to_tensor, 'floats_tensor': floats_tensor, 'return_tensors': 'tf', } @slow def _a (self ): # TF-only test: tf.saved_model export A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) A_ : str = 2 A_ : Dict = 2 class _lowerCAmelCase ( tf.Module ): def __init__(self , lowercase ): super(lowercase , self ).__init__() A_ : Any = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=lowercase , ) def _a (self , lowercase , lowercase ): A_ : List[str] = self.model.generate( input_ids=lowercase , attention_mask=lowercase , max_new_tokens=lowercase , return_dict_in_generate=lowercase , ) return {"sequences": outputs["sequences"]} A_ : Optional[int] = [[2, 0], [102, 103]] A_ : Optional[int] = [[1, 0], [1, 1]] A_ : Union[str, Any] = DummyModel(model=lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(lowercase , lowercase , signatures={"""serving_default""": dummy_model.serving} ) A_ : Optional[int] = tf.saved_model.load(lowercase ).signatures["""serving_default"""] for batch_size in range(1 , len(lowercase ) + 1 ): A_ : Optional[int] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } A_ : Dict = serving_func(**lowercase )["""sequences"""] A_ : List[str] = test_model.generate(**lowercase , max_new_tokens=lowercase ) tf.debugging.assert_equal(lowercase , lowercase ) @slow def _a (self ): # TF-only test: tf.saved_model export A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) A_ : Any = 1 A_ : Optional[Any] = 2 class _lowerCAmelCase ( tf.Module ): def __init__(self , lowercase ): super(lowercase , self ).__init__() A_ : List[Any] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=lowercase , ) def _a (self , lowercase , lowercase ): A_ : Any = self.model.generate( input_ids=lowercase , attention_mask=lowercase , max_new_tokens=lowercase , return_dict_in_generate=lowercase , ) return {"sequences": outputs["sequences"]} A_ : Tuple = [[2], [102, 103]] A_ : Any = [[1], [1, 1]] A_ : Union[str, Any] = DummyModel(model=lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(lowercase , lowercase , signatures={"""serving_default""": dummy_model.serving} ) A_ : List[str] = tf.saved_model.load(lowercase ).signatures["""serving_default"""] for input_row in range(len(lowercase ) ): A_ : Any = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } A_ : Union[str, Any] = serving_func(**lowercase )["""sequences"""] A_ : str = test_model.generate(**lowercase , max_new_tokens=lowercase ) tf.debugging.assert_equal(lowercase , lowercase ) @slow @require_tensorflow_text def _a (self ): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=lowercase ) class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self ): super().__init__() A_ : List[str] = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(lowercase , """spiece.model""" ) , """rb""" ).read() ) A_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def _a (self , lowercase , *lowercase , **lowercase ): A_ : Union[str, Any] = self.tokenizer.tokenize(lowercase ) A_, A_ : int = text.pad_model_inputs( lowercase , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) A_ : Dict = self.model.generate(input_ids=lowercase , attention_mask=lowercase ) return self.tokenizer.detokenize(lowercase ) A_ : Any = CompleteSentenceTransformer() A_ : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) A_ : Union[str, Any] = complete_model(lowercase ) A_ : Tuple = tf.keras.Model(lowercase , lowercase ) keras_model.save(lowercase ) def _a (self ): # Has PT equivalent: this test relies on random sampling A_ : str = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } A_ : Any = 14 A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) A_ : str = """Hello, my dog is cute and""" A_ : List[str] = tokenizer(lowercase , return_tensors="""tf""" ) A_ : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) A_ : List[Any] = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) A_ : Optional[int] = model.generate(**lowercase , eos_token_id=lowercase , **lowercase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) A_ : int = [638, 198] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) A_ : Dict = model.generate(**lowercase , eos_token_id=lowercase , **lowercase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _a (self ): # Has PT equivalent: ample use of framework-specific code A_ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) A_ : List[str] = """Hugging Face is a technology company based in New York and Paris.""" A_ : Dict = bart_tokenizer(lowercase , return_tensors="""tf""" ).input_ids A_ : str = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) A_ : Union[str, Any] = bart_model.generate(lowercase ).numpy() class _lowerCAmelCase ( __UpperCAmelCase ): def _a (self , lowercase , lowercase=None , **lowercase ): return super().call(lowercase , **lowercase ) A_ : Tuple = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) A_ : Optional[int] = bart_model.generate(lowercase , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(lowercase , lowercase ) ) class _lowerCAmelCase ( bart_model.model.encoder.__class__ ): def _a (self , lowercase , **lowercase ): return super().call(lowercase , **lowercase ) A_ : Dict = FakeEncoder(bart_model.config , bart_model.model.shared ) A_ : Optional[int] = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) A_ : str = bart_model.generate(lowercase ).numpy() with self.assertRaises(lowercase ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(lowercase , foo="""bar""" )
667
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer'] __SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor' __SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__(self , lowercase=None , lowercase=None , **lowercase ): A_ : Dict = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase , ) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase , lowercase ) def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) A_ : Dict = features["""words"""] A_ : Optional[int] = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) # add pixel values A_ : List[Any] = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] ) A_ : Optional[int] = images return encoded_inputs def _a (self , lowercase , lowercase ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image A_ : str = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase ) != len(lowercase ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" F' {len(lowercase )} and {len(lowercase )}' ) return images_with_overflow def _a (self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property def _a (self ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _a (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , ) return self.image_processor_class @property def _a (self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , ) return self.image_processor
667
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def a ( lowerCamelCase__=None ): '''simple docstring''' A_ : Tuple = argparse.ArgumentParser(add_help=lowerCamelCase__ , allow_abbrev=lowerCamelCase__ ) # The main config parser A_ : Dict = config_command_parser(lowerCamelCase__ ) # The subparser to add commands to A_ : Any = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" ) # Then add other parsers with the parent parser default_command_parser(lowerCamelCase__ , parents=[parent_parser] ) update_command_parser(lowerCamelCase__ , parents=[parent_parser] ) return config_parser def a ( ): '''simple docstring''' A_ : List[Any] = get_config_parser() A_ : List[Any] = config_parser.parse_args() if not hasattr(lowerCamelCase__ , """func""" ): config_parser.print_help() exit(1 ) # Run args.func(lowerCamelCase__ ) if __name__ == "__main__": main()
667
'''simple docstring''' from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCamelCase :Optional[int] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , **lowercase ): super().__init__(**lowercase ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) self.check_model_type(lowercase ) def _a (self , **lowercase ): A_ : str = {} A_ : Dict = {} A_ : str = {} # preprocess args if "points_per_batch" in kwargs: A_ : Dict = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: A_ : int = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: A_ : str = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: A_ : int = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: A_ : Any = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: A_ : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: A_ : Union[str, Any] = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: A_ : List[str] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: A_ : List[Any] = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ): return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase ) def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ): A_ : Tuple = load_image(lowercase ) A_ : int = self.image_processor.size["""longest_edge"""] A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": A_ : Optional[Any] = self.get_inference_context() with inference_context(): A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device ) A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) A_ : Tuple = image_embeddings A_ : Dict = grid_points.shape[1] A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , lowercase , lowercase ): A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :] A_ : List[Any] = input_labels[:, i : i + points_per_batch] A_ : Optional[Any] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ): A_ : Any = model_inputs.pop("""input_boxes""" ) A_ : str = model_inputs.pop("""is_last""" ) A_ : int = model_inputs.pop("""original_sizes""" ).tolist() A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist() A_ : List[str] = self.model(**lowercase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks A_ : Optional[int] = model_outputs["""pred_masks"""] A_ : Tuple = self.image_processor.post_process_masks( lowercase , lowercase , lowercase , lowercase , binarize=lowercase ) A_ : Union[str, Any] = model_outputs["""iou_scores"""] A_, A_, A_ : Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ): A_ : Tuple = [] A_ : Optional[Any] = [] A_ : str = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) A_ : Any = torch.cat(lowercase ) A_ : List[Any] = torch.cat(lowercase ) A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation( lowercase , lowercase , lowercase , lowercase ) A_ : int = defaultdict(lowercase ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowercase ) A_ : Optional[int] = {} if output_rle_mask: A_ : List[str] = rle_mask if output_bboxes_mask: A_ : Optional[int] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
667
1
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=False , lowercase=True , lowercase="None" , lowercase=3 , lowercase=4 , lowercase=None , ): A_ : Optional[int] = parent A_ : int = batch_size A_ : Union[str, Any] = seq_length A_ : str = is_training A_ : Dict = use_input_mask A_ : Dict = use_token_type_ids A_ : Dict = use_labels A_ : List[str] = vocab_size A_ : int = hidden_size A_ : Tuple = num_hidden_layers A_ : int = num_attention_heads A_ : Optional[Any] = intermediate_size A_ : Any = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Optional[int] = type_sequence_label_size A_ : str = initializer_range A_ : Union[str, Any] = num_labels A_ : Union[str, Any] = num_choices A_ : Dict = relative_attention A_ : Tuple = position_biased_input A_ : str = pos_att_type A_ : Union[str, Any] = scope def _a (self ): A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Optional[int] = None if self.use_input_mask: A_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) A_ : List[str] = None if self.use_token_type_ids: A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Optional[Any] = None A_ : Union[str, Any] = None A_ : Dict = None if self.use_labels: A_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) A_ : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a (self ): return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _a (self ): A_ : List[Any] = self.get_config() A_ : Any = 300 return config def _a (self , lowercase ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : Optional[Any] = DebertaModel(config=lowercase ) model.to(lowercase ) model.eval() A_ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )[0] A_ : int = model(lowercase , token_type_ids=lowercase )[0] A_ : str = model(lowercase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : Any = DebertaForMaskedLM(config=lowercase ) model.to(lowercase ) model.eval() A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : Union[str, Any] = self.num_labels A_ : Optional[int] = DebertaForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() A_ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : Optional[Any] = self.num_labels A_ : List[str] = DebertaForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() A_ : Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): A_ : Union[str, Any] = DebertaForQuestionAnswering(config=lowercase ) model.to(lowercase ) model.eval() A_ : str = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a (self ): A_ : Dict = self.prepare_config_and_inputs() ( ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ( A_ ), ) : Dict = config_and_inputs A_ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : int = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = ( { 'feature-extraction': DebertaModel, 'fill-mask': DebertaForMaskedLM, 'question-answering': DebertaForQuestionAnswering, 'text-classification': DebertaForSequenceClassification, 'token-classification': DebertaForTokenClassification, 'zero-shot': DebertaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : List[str] = False def _a (self ): A_ : Optional[Any] = DebertaModelTester(self ) A_ : int = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def _a (self ): self.config_tester.run_common_tests() def _a (self ): A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowercase ) def _a (self ): A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase ) def _a (self ): A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase ) def _a (self ): A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowercase ) def _a (self ): A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowercase ) @slow def _a (self ): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : str = DebertaModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @unittest.skip(reason="""Model not available yet""" ) def _a (self ): pass @slow def _a (self ): A_ : Tuple = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) A_ : int = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) A_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A_ : Union[str, Any] = model(lowercase , attention_mask=lowercase )[0] # compare the actual values for a slice. A_ : int = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
667
'''simple docstring''' from collections.abc import Callable import numpy as np def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) A_ : int = np.zeros((n + 1,) ) A_ : List[str] = ya A_ : Any = xa for k in range(lowerCamelCase__ ): A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) A_ : Optional[int] = y[k] + ( (step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ): '''simple docstring''' if config_name_or_path is None: A_ : int = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: A_ : List[Any] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: A_ : Union[str, Any] = question_encoder_name_or_path A_ : int = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. A_ : Optional[int] = RagConfig.from_pretrained(lowerCamelCase__ ) A_ : str = AutoConfig.from_pretrained(lowerCamelCase__ ) A_ : Dict = AutoConfig.from_pretrained(lowerCamelCase__ ) A_ : Dict = gen_config A_ : str = question_encoder_config A_ : Any = model_class.from_pretrained_question_encoder_generator( lowerCamelCase__ , lowerCamelCase__ , config=lowerCamelCase__ ) rag_model.save_pretrained(lowerCamelCase__ ) # Sanity check. model_class.from_pretrained(lowerCamelCase__ ) # Save tokenizers. A_ : Any = AutoTokenizer.from_pretrained(lowerCamelCase__ ) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" ) A_ : str = AutoTokenizer.from_pretrained(lowerCamelCase__ ) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" ) if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_type''', choices=['''rag_sequence''', '''rag_token'''], required=True, type=str, help='''RAG model type: rag_sequence, rag_token''', ) parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''') parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''') parser.add_argument( '''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier''' ) parser.add_argument( '''--generator_tokenizer_name_or_path''', type=str, help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''', ) parser.add_argument( '''--question_encoder_tokenizer_name_or_path''', type=str, help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''', ) parser.add_argument( '''--config_name_or_path''', type=str, help=( '''Identifier of the model config to use, if not provided, resolves to a base config for a given''' ''' ``model_type``''' ), ) lowerCamelCase :Dict = parser.parse_args() lowerCamelCase :Union[str, Any] = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
667
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ ) if matches: A_ : Optional[Any] = float(matches[1] ) A_ : Union[str, Any] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". A_ : Optional[Any] = 10_01 A_ : Union[str, Any] = """imagenet-1k-id2label.json""" A_ : List[str] = """huggingface/label-files""" A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()} A_ : int = """background""" A_ : List[str] = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def a ( ): '''simple docstring''' A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ ) # Load 🤗 model A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor A_ : Any = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" ) A_ : List[str] = model(**lowerCamelCase__ ) A_ : Any = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: A_ : Any = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print("""Pushing to the hub...""" ) A_ : Union[str, Any] = """google/""" + model_name image_processor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase :str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
667
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase :Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values'] def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ): super().__init__(**lowercase ) A_ : Dict = size if size is not None else {"""shortest_edge""": 224} A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" ) A_ : str = do_resize A_ : str = size A_ : List[str] = resample A_ : Any = do_center_crop A_ : Union[str, Any] = crop_size A_ : List[Any] = do_rescale A_ : List[Any] = rescale_factor A_ : Dict = do_normalize A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Union[str, Any] = do_convert_rgb def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): A_ : List[str] = do_resize if do_resize is not None else self.do_resize A_ : int = size if size is not None else self.size A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase ) A_ : int = resample if resample is not None else self.resample A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase ) A_ : str = do_rescale if do_rescale is not None else self.do_rescale A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : Any = image_mean if image_mean is not None else self.image_mean A_ : Any = image_std if image_std is not None else self.image_std A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : List[str] = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : int = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A_ : int = [to_numpy_array(lowercase ) for image in images] if do_resize: A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
667
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer' __SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer'] __SCREAMING_SNAKE_CASE : Tuple = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__(self , lowercase , lowercase=None ): super().__init__(lowercase ) A_ : Any = speaker_embeddings @classmethod def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: A_ : Any = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) A_ : str = None else: with open(lowercase ) as speaker_embeddings_json: A_ : List[str] = json.load(lowercase ) else: A_ : str = None A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase ) A_ : Optional[int] = {} A_ : Tuple = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": A_ : Union[str, Any] = self._load_voice_preset(lowercase ) A_ : Tuple = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , ) A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' ) A_ : str = tmp_dict with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def _a (self , lowercase = None , **lowercase ): A_ : List[Any] = self.speaker_embeddings[voice_preset] A_ : Optional[Any] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) A_ : int = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) A_ : Tuple = np.load(lowercase ) return voice_preset_dict def _a (self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): A_ : Optional[int] = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ): A_ : Optional[int] = voice_preset + """.npz""" A_ : Any = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase ) A_ : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: A_ : Union[str, Any] = voice_preset return encoded_text
667
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ): super().__init__() if safety_checker is None: logger.warning( F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure' """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( speech_model=lowercase , speech_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , feature_extractor=lowercase , ) def _a (self , lowercase = "auto" ): if slice_size == "auto": A_ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase ) def _a (self ): self.enable_attention_slicing(lowercase ) @torch.no_grad() def __call__(self , lowercase , lowercase=16000 , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ): A_ : List[str] = self.speech_processor.feature_extractor( lowercase , return_tensors="""pt""" , sampling_rate=lowercase ).input_features.to(self.device ) A_ : str = self.speech_model.generate(lowercase , max_length=480000 ) A_ : Optional[int] = self.speech_processor.tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase , normalize=lowercase )[ 0 ] if isinstance(lowercase , lowercase ): A_ : str = 1 elif isinstance(lowercase , lowercase ): A_ : Optional[int] = len(lowercase ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowercase )}' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(lowercase )}.' ) # get prompt text embeddings A_ : int = self.tokenizer( lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) A_ : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A_ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) A_ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length] A_ : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A_, A_, A_ : str = text_embeddings.shape A_ : Optional[int] = text_embeddings.repeat(1 , lowercase , 1 ) A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A_ : Tuple = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A_ : List[str] if negative_prompt is None: A_ : Dict = [""""""] * batch_size elif type(lowercase ) is not type(lowercase ): raise TypeError( F'`negative_prompt` should be the same type to `prompt`, but got {type(lowercase )} !=' F' {type(lowercase )}.' ) elif isinstance(lowercase , lowercase ): A_ : Any = [negative_prompt] elif batch_size != len(lowercase ): raise ValueError( F'`negative_prompt`: {negative_prompt} has batch size {len(lowercase )}, but `prompt`:' F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches' """ the batch size of `prompt`.""" ) else: A_ : Optional[int] = negative_prompt A_ : Dict = text_input_ids.shape[-1] A_ : List[Any] = self.tokenizer( lowercase , padding="""max_length""" , max_length=lowercase , truncation=lowercase , return_tensors="""pt""" , ) A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A_ : Dict = uncond_embeddings.shape[1] A_ : Dict = uncond_embeddings.repeat(1 , lowercase , 1 ) A_ : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A_ : Optional[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A_ : Tuple = torch.randn(lowercase , generator=lowercase , device="""cpu""" , dtype=lowercase ).to( self.device ) else: A_ : Optional[Any] = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) A_ : Optional[Any] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(lowercase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A_ : Optional[Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A_ : str = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ : Optional[Any] = {} if accepts_eta: A_ : Any = eta for i, t in enumerate(self.progress_bar(lowercase ) ): # expand the latents if we are doing classifier free guidance A_ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ : List[Any] = self.scheduler.scale_model_input(lowercase , lowercase ) # predict the noise residual A_ : List[Any] = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample # perform guidance if do_classifier_free_guidance: A_, A_ : List[str] = noise_pred.chunk(2 ) A_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A_ : List[str] = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase , lowercase , lowercase ) A_ : Tuple = 1 / 0.1_82_15 * latents A_ : List[str] = self.vae.decode(lowercase ).sample A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A_ : Tuple = self.numpy_to_pil(lowercase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase )
667
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Union[str, Any] = tempfile.mkdtemp() A_ : List[Any] = BlipImageProcessor() A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase ) processor.save_pretrained(self.tmpdirname ) def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer def _a (self ): shutil.rmtree(self.tmpdirname ) def _a (self ): A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a (self ): A_ : str = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 ) A_ : str = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase ) self.assertIsInstance(processor.qformer_tokenizer , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : List[str] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = self.prepare_image_inputs() A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" ) A_ : Dict = processor(images=lowercase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a (self ): A_ : List[Any] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : Any = self.get_qformer_tokenizer() A_ : List[str] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : str = """lower newer""" A_ : List[Any] = processor(text=lowercase ) A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase ) A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def _a (self ): A_ : int = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Any = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Optional[int] = """lower newer""" A_ : Optional[int] = self.prepare_image_inputs() A_ : Tuple = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(lowercase ): processor() def _a (self ): A_ : Dict = self.get_image_processor() A_ : str = self.get_tokenizer() A_ : Optional[int] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : Optional[int] = processor.batch_decode(lowercase ) A_ : Dict = tokenizer.batch_decode(lowercase ) self.assertListEqual(lowercase , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Dict = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Optional[int] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = """lower newer""" A_ : Optional[Any] = self.prepare_image_inputs() A_ : Any = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
667
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Optional[int] = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } A_ : Any = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 128, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 142, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(lowercase ) , lowercase ) def _a (self ): A_ : Optional[int] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(lowercase ) , x.transpose() ) ) A_ : List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def _a (self ): A_ : Optional[int] = np.random.randn(3 , 4 ) A_ : str = torch.tensor(lowercase ) self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) ) A_ : Tuple = np.random.randn(3 , 4 , 5 ) A_ : int = torch.tensor(lowercase ) self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def _a (self ): A_ : str = np.random.randn(3 , 4 ) A_ : int = tf.constant(lowercase ) self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) ) A_ : Any = np.random.randn(3 , 4 , 5 ) A_ : List[str] = tf.constant(lowercase ) self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def _a (self ): A_ : Optional[int] = np.random.randn(3 , 4 ) A_ : Optional[Any] = jnp.array(lowercase ) self.assertTrue(np.allclose(transpose(lowercase ) , np.asarray(transpose(lowercase ) ) ) ) A_ : str = np.random.randn(3 , 4 , 5 ) A_ : str = jnp.array(lowercase ) self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(lowercase , axes=(1, 2, 0) ) ) ) ) def _a (self ): A_ : str = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.reshape(lowercase , (4, 3) ) ) ) A_ : int = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , np.reshape(lowercase , (12, 5) ) ) ) @require_torch def _a (self ): A_ : Optional[Any] = np.random.randn(3 , 4 ) A_ : List[Any] = torch.tensor(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) ) A_ : Optional[int] = np.random.randn(3 , 4 , 5 ) A_ : int = torch.tensor(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , reshape(lowercase , (12, 5) ).numpy() ) ) @require_tf def _a (self ): A_ : Optional[Any] = np.random.randn(3 , 4 ) A_ : List[Any] = tf.constant(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) ) A_ : Optional[int] = np.random.randn(3 , 4 , 5 ) A_ : int = tf.constant(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , reshape(lowercase , (12, 5) ).numpy() ) ) @require_flax def _a (self ): A_ : Dict = np.random.randn(3 , 4 ) A_ : str = jnp.array(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.asarray(reshape(lowercase , (4, 3) ) ) ) ) A_ : Optional[int] = np.random.randn(3 , 4 , 5 ) A_ : List[Any] = jnp.array(lowercase ) self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , np.asarray(reshape(lowercase , (12, 5) ) ) ) ) def _a (self ): A_ : List[str] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(lowercase ) , np.squeeze(lowercase ) ) ) A_ : List[str] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.squeeze(lowercase , axis=2 ) ) ) @require_torch def _a (self ): A_ : List[str] = np.random.randn(1 , 3 , 4 ) A_ : Any = torch.tensor(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) ) A_ : str = np.random.randn(1 , 4 , 1 , 5 ) A_ : Optional[int] = torch.tensor(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) ) @require_tf def _a (self ): A_ : Tuple = np.random.randn(1 , 3 , 4 ) A_ : List[Any] = tf.constant(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) ) A_ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 ) A_ : Dict = tf.constant(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) ) @require_flax def _a (self ): A_ : Tuple = np.random.randn(1 , 3 , 4 ) A_ : Dict = jnp.array(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase ) , np.asarray(squeeze(lowercase ) ) ) ) A_ : str = np.random.randn(1 , 4 , 1 , 5 ) A_ : str = jnp.array(lowercase ) self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.asarray(squeeze(lowercase , axis=2 ) ) ) ) def _a (self ): A_ : List[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.expand_dims(lowercase , axis=1 ) ) ) @require_torch def _a (self ): A_ : Tuple = np.random.randn(3 , 4 ) A_ : str = torch.tensor(lowercase ) self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) ) @require_tf def _a (self ): A_ : List[Any] = np.random.randn(3 , 4 ) A_ : List[Any] = tf.constant(lowercase ) self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) ) @require_flax def _a (self ): A_ : Any = np.random.randn(3 , 4 ) A_ : Any = jnp.array(lowercase ) self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.asarray(expand_dims(lowercase , axis=1 ) ) ) )
667
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Optional[Any] = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str' def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ): super().__init__(**lowercase ) A_ : int = image_size A_ : List[str] = patch_size A_ : Tuple = num_channels A_ : List[str] = max_token_length A_ : int = num_character_labels A_ : str = num_bpe_labels A_ : Tuple = num_wordpiece_labels A_ : Optional[int] = hidden_size A_ : List[Any] = num_hidden_layers A_ : int = num_attention_heads A_ : Tuple = mlp_ratio A_ : str = distilled A_ : Union[str, Any] = layer_norm_eps A_ : str = drop_rate A_ : int = qkv_bias A_ : Dict = attn_drop_rate A_ : List[Any] = drop_path_rate A_ : Any = output_aa_attentions A_ : Union[str, Any] = initializer_range
667
1
'''simple docstring''' import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase :Any = logging.get_logger(__name__) class _lowerCAmelCase ( enum.Enum ): __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : List[str] = 1 @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = 'generated' def __init__(self , *lowercase , **lowercase ): super().__init__(*lowercase , **lowercase ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def _a (self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , **lowercase , ): A_ : Dict = {} if truncation is not None: A_ : Optional[int] = truncation A_ : Tuple = generate_kwargs A_ : List[str] = {} if return_tensors is not None and return_type is None: A_ : Tuple = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: A_ : List[str] = return_type if clean_up_tokenization_spaces is not None: A_ : List[str] = clean_up_tokenization_spaces if stop_sequence is not None: A_ : Union[str, Any] = self.tokenizer.encode(lowercase , add_special_tokens=lowercase ) if len(lowercase ) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""" ) A_ : Dict = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _a (self , lowercase , lowercase , lowercase ): return True def _a (self , *lowercase , lowercase ): A_ : str = self.model.config.prefix if self.model.config.prefix is not None else """""" if isinstance(args[0] , lowercase ): if self.tokenizer.pad_token_id is None: raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" ) A_ : List[str] = ([prefix + arg for arg in args[0]],) A_ : int = True elif isinstance(args[0] , lowercase ): A_ : Tuple = (prefix + args[0],) A_ : Any = False else: raise ValueError( F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' ) A_ : Any = self.tokenizer(*lowercase , padding=lowercase , truncation=lowercase , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__(self , *lowercase , **lowercase ): A_ : Any = super().__call__(*lowercase , **lowercase ) if ( isinstance(args[0] , lowercase ) and all(isinstance(lowercase , lowercase ) for el in args[0] ) and all(len(lowercase ) == 1 for res in result ) ): return [res[0] for res in result] return result def _a (self , lowercase , lowercase=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase ): A_ : Tuple = self._parse_and_tokenize(lowercase , truncation=lowercase , **lowercase ) return inputs def _a (self , lowercase , **lowercase ): if self.framework == "pt": A_, A_ : Union[str, Any] = model_inputs["""input_ids"""].shape elif self.framework == "tf": A_, A_ : List[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy() A_ : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length ) A_ : List[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length ) self.check_inputs(lowercase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] ) A_ : Optional[Any] = self.model.generate(**lowercase , **lowercase ) A_ : str = output_ids.shape[0] if self.framework == "pt": A_ : Dict = output_ids.reshape(lowercase , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": A_ : Optional[int] = tf.reshape(lowercase , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def _a (self , lowercase , lowercase=ReturnType.TEXT , lowercase=False ): A_ : int = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: A_ : Tuple = {F'{self.return_name}_token_ids': output_ids} elif return_type == ReturnType.TEXT: A_ : List[Any] = { F'{self.return_name}_text': self.tokenizer.decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase , ) } records.append(lowercase ) return records @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[Any] = 'summary' def __call__(self , *lowercase , **lowercase ): return super().__call__(*lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase ): if max_length < min_length: logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' ) if input_length < max_length: logger.warning( F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ' """a summarization task, where outputs shorter than the input are typically wanted, you might """ F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' ) @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = 'translation' def _a (self , lowercase , lowercase , lowercase ): if input_length > 0.9 * max_length: logger.warning( F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ' """increasing your max_length manually, e.g. translator('...', max_length=400)""" ) return True def _a (self , *lowercase , lowercase=TruncationStrategy.DO_NOT_TRUNCATE , lowercase=None , lowercase=None ): if getattr(self.tokenizer , """_build_translation_inputs""" , lowercase ): return self.tokenizer._build_translation_inputs( *lowercase , return_tensors=self.framework , truncation=lowercase , src_lang=lowercase , tgt_lang=lowercase ) else: return super()._parse_and_tokenize(*lowercase , truncation=lowercase ) def _a (self , lowercase=None , lowercase=None , **lowercase ): A_, A_, A_ : Dict = super()._sanitize_parameters(**lowercase ) if src_lang is not None: A_ : Any = src_lang if tgt_lang is not None: A_ : Dict = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. A_ : List[str] = kwargs.get("""task""" , self.task ) A_ : str = task.split("""_""" ) if task and len(lowercase ) == 4: # translation, XX, to YY A_ : Union[str, Any] = items[1] A_ : Any = items[3] return preprocess_params, forward_params, postprocess_params def __call__(self , *lowercase , **lowercase ): return super().__call__(*lowercase , **lowercase )
667
'''simple docstring''' import math from collections.abc import Callable def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : float = xa A_ : float = xa while True: if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ): raise ZeroDivisionError("""float division by zero, could not find root""" ) A_ : float = x_na - ( function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na A_ : Tuple = x_na A_ : List[Any] = x_na def a ( lowerCamelCase__ ): '''simple docstring''' return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
667
1
'''simple docstring''' import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _lowerCAmelCase ( unittest.TestCase ): @require_torch def _a (self ): A_ : str = pipeline( task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" ) A_ : Union[str, Any] = load_dataset("""ashraq/esc50""" ) A_ : Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""] A_ : Dict = audio_classifier(lowercase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(lowercase ) , [{"""score""": 0.5_01, """label""": """Sound of a dog"""}, {"""score""": 0.4_99, """label""": """Sound of vaccum cleaner"""}] , ) @unittest.skip("""No models are available in TF""" ) def _a (self ): pass @slow @require_torch def _a (self ): A_ : int = pipeline( task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , ) # This is an audio of a dog A_ : Dict = load_dataset("""ashraq/esc50""" ) A_ : str = dataset["""train"""]["""audio"""][-1]["""array"""] A_ : Any = audio_classifier(lowercase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(lowercase ) , [ {"""score""": 0.9_99, """label""": """Sound of a dog"""}, {"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""}, ] , ) A_ : Optional[int] = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ) self.assertEqual( nested_simplify(lowercase ) , [ [ {"""score""": 0.9_99, """label""": """Sound of a dog"""}, {"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) A_ : Optional[int] = audio_classifier( [audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase ) , [ [ {"""score""": 0.9_99, """label""": """Sound of a dog"""}, {"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) @unittest.skip("""No models are available in TF""" ) def _a (self ): pass
667
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase :Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values'] def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ): super().__init__(**lowercase ) A_ : Dict = size if size is not None else {"""shortest_edge""": 224} A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" ) A_ : str = do_resize A_ : str = size A_ : List[str] = resample A_ : Any = do_center_crop A_ : Union[str, Any] = crop_size A_ : List[Any] = do_rescale A_ : List[Any] = rescale_factor A_ : Dict = do_normalize A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Union[str, Any] = do_convert_rgb def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): A_ : List[str] = do_resize if do_resize is not None else self.do_resize A_ : int = size if size is not None else self.size A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase ) A_ : int = resample if resample is not None else self.resample A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase ) A_ : str = do_rescale if do_rescale is not None else self.do_rescale A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : Any = image_mean if image_mean is not None else self.image_mean A_ : Any = image_std if image_std is not None else self.image_std A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : List[str] = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : int = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A_ : int = [to_numpy_array(lowercase ) for image in images] if do_resize: A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
667
1
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def a ( lowerCamelCase__ ): '''simple docstring''' return getitem, k def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return setitem, k, v def a ( lowerCamelCase__ ): '''simple docstring''' return delitem, k def a ( lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ): '''simple docstring''' try: return fun(lowerCamelCase__ , *lowerCamelCase__ ), None except Exception as e: return None, e lowerCamelCase :int = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCamelCase :Tuple = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCamelCase :Optional[Any] = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCamelCase :Any = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCamelCase :Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCamelCase :List[str] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = HashMap(initial_block_size=4 ) A_ : Optional[Any] = {} for _, (fun, *args) in enumerate(lowerCamelCase__ ): A_, A_ : List[Any] = _run_operation(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) A_, A_ : Dict = _run_operation(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) assert my_res == py_res assert str(lowerCamelCase__ ) == str(lowerCamelCase__ ) assert set(lowerCamelCase__ ) == set(lowerCamelCase__ ) assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) assert set(my.items() ) == set(py.items() ) def a ( ): '''simple docstring''' def is_public(lowerCamelCase__ ) -> bool: return not name.startswith("""_""" ) A_ : Optional[int] = {name for name in dir({} ) if is_public(lowerCamelCase__ )} A_ : Optional[int] = {name for name in dir(HashMap() ) if is_public(lowerCamelCase__ )} assert dict_public_names > hash_public_names
667
'''simple docstring''' class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase ): A_ : List[str] = name A_ : Dict = value A_ : Optional[int] = weight def __repr__(self ): return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def _a (self ): return self.value def _a (self ): return self.name def _a (self ): return self.weight def _a (self ): return self.value / self.weight def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = [] for i in range(len(lowerCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ ) A_ : Any = [] A_, A_ : Tuple = 0.0, 0.0 for i in range(len(lowerCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder lowerCamelCase :Any = '''__DUMMY_TRANSFORMERS_USER__''' lowerCamelCase :Optional[Any] = '''Dummy User''' lowerCamelCase :List[str] = '''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''' lowerCamelCase :Union[str, Any] = '''https://hub-ci.huggingface.co''' lowerCamelCase :Optional[Any] = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' lowerCamelCase :Dict = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' lowerCamelCase :Tuple = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def a ( lowerCamelCase__ ): '''simple docstring''' monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowerCamelCase__ ) @pytest.fixture def a ( lowerCamelCase__ ): '''simple docstring''' monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowerCamelCase__ ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowerCamelCase__ ) @pytest.fixture def a ( lowerCamelCase__ ): '''simple docstring''' monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowerCamelCase__ ) @pytest.fixture def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' HfFolder.save_token(lowerCamelCase__ ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def a ( ): '''simple docstring''' return HfApi(endpoint=lowerCamelCase__ ) @pytest.fixture(scope="""session""" ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = HfFolder.get_token() HfFolder.save_token(lowerCamelCase__ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(lowerCamelCase__ ) @pytest.fixture def a ( lowerCamelCase__ ): '''simple docstring''' def _cleanup_repo(lowerCamelCase__ ): hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def a ( lowerCamelCase__ ): '''simple docstring''' @contextmanager def _temporary_repo(lowerCamelCase__ ): try: yield repo_id finally: cleanup_repo(lowerCamelCase__ ) return _temporary_repo @pytest.fixture(scope="""session""" ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = f'repo_txt_data-{int(time.time() * 10E3 )}' A_ : int = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ ) hf_api.upload_file( token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data/text_data.txt""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = f'repo_zipped_txt_data-{int(time.time() * 10E3 )}' A_ : Optional[int] = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ ) hf_api.upload_file( token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[Any] = f'repo_zipped_img_data-{int(time.time() * 10E3 )}' A_ : List[str] = f'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ ) hf_api.upload_file( token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
667
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCamelCase :int = logging.getLogger(__name__) lowerCamelCase :List[Any] = 5_0 # max width of layer names lowerCamelCase :List[Any] = 7_0 # max width of quantizer names def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" ) group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" ) group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" ) group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" ) group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" ) group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" ) group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" ) group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" ) group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" ) group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" ) group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" ) group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" ) group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" ) group.add_argument( """--recalibrate-weights""" , action="""store_true""" , help=( """recalibrate weight amaxes by taking the max of the weights.""" """ amaxes will be computed with the current quantization granularity (axis).""" ) , ) def a ( lowerCamelCase__ ): '''simple docstring''' if args.calibrator == "max": A_ : Union[str, Any] = """max""" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("""Specify --percentile when using percentile calibrator""" ) A_ : int = """histogram""" elif args.calibrator == "mse": A_ : Dict = """histogram""" else: raise ValueError(f'Invalid calibrator {args.calibrator}' ) A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ ) A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ): '''simple docstring''' logger.info("""Configuring Model for Quantization""" ) logger.info(f'using quantization package {pytorch_quantization.__file__}' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ ) if args.quant_disable: set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ ) if args.recalibrate_weights: recalibrate_weights(lowerCamelCase__ ) if args.fuse_qkv: fuse_qkv(lowerCamelCase__ , lowerCamelCase__ ) if args.clip_gelu: clip_gelu(lowerCamelCase__ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' logger.info("""Enabling Calibration""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'{name:80}: {module}' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' logger.info("""Loading calibrated amax""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("""percentile""" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): for mod in [qq, qk, qv]: if not hasattr(lowerCamelCase__ , """_amax""" ): print(""" WARNING: NO AMAX BUFFER""" ) return A_ : List[Any] = qq._amax.detach().item() A_ : Optional[int] = qk._amax.detach().item() A_ : Dict = qv._amax.detach().item() A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) qq._amax.fill_(lowerCamelCase__ ) qk._amax.fill_(lowerCamelCase__ ) qv._amax.fill_(lowerCamelCase__ ) logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' ) for name, mod in model.named_modules(): if name.endswith(""".attention.self""" ): logger.info(f'FUSE_QKV: {name:{name_width}}' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ): A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ ) A_ : Dict = mod._input_quantizer._amax.data.detach().item() logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None: A_ : Tuple = mod.weight.shape[0] A_ : Dict = mod._weight_quantizer._amax.detach() A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ): if not hasattr(mod.weight_quantizer , """_amax""" ): print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach() logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' ) A_ : str = amax def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ): '''simple docstring''' if ignore is None: A_ : int = [] elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = [ignore] A_ : Optional[Any] = 0 for name, mod in model.named_modules(): if not hasattr(lowerCamelCase__ , """weight""" ): continue A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) ) for name, mod in model.named_modules(): A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ ) A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ ) if not hasattr(lowerCamelCase__ , """weight""" ): continue if type(lowerCamelCase__ ) in ignore: continue if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]: continue A_ : Optional[int] = f'Act:{input_q.extra_repr()}' A_ : Dict = f'Wgt:{weight_q.extra_repr()}' A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}' if len(lowerCamelCase__ ) <= line_width: logger.info(lowerCamelCase__ ) else: logger.info(f'{name:{name_width}} {act_str}' ) logger.info(f'{" ":{name_width}} {wgt_str}' ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = 0 for name, mod in model.named_modules(): if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ): print(f'{name:80} {mod}' ) count += 1 print(f'{count} TensorQuantizers found in model' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if quantizer_mod is not None: assert hasattr(lowerCamelCase__ , lowerCamelCase__ ) setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: logger.warning(f'{name} has no {quantizer}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}' for k, v in kwargs.items(): s += f' {k}={v}' if which in ["input", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) if which in ["weight", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) elif name.endswith("""_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): A_ : Dict = f'Warning: changing {name:{name_width}}' for k, v in kwargs.items(): s += f' {k}={v}' setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ )
667
1
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Tuple = [False] * len(lowerCamelCase__ ) A_ : Tuple = [] queue.append(lowerCamelCase__ ) A_ : Optional[Any] = True while queue: A_ : Dict = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowerCamelCase__ ) A_ : int = True A_ : Tuple = u return visited[t] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = [-1] * (len(lowerCamelCase__ )) A_ : Dict = 0 while bfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): A_ : Tuple = float("""Inf""" ) A_ : str = sink while s != source: # Find the minimum value in select path A_ : Tuple = min(lowerCamelCase__ , graph[parent[s]][s] ) A_ : List[Any] = parent[s] max_flow += path_flow A_ : List[Any] = sink while v != source: A_ : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A_ : Union[str, Any] = parent[v] return max_flow lowerCamelCase :Optional[int] = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCamelCase , lowerCamelCase :Optional[Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
667
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : List[Any] = 0 @slow def _a (self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) def _a (self ): A_ : str = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def _a (self ): A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) # Check that tokenizer_type ≠ model_type A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) @require_tokenizers def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" ) self.assertIsInstance(lowercase , lowercase ) def _a (self ): with pytest.raises(lowercase ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) if isinstance(lowercase , lowercase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase ) else: self.assertEqual(tokenizer.do_lower_case , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def _a (self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai A_ : List[str] = TOKENIZER_MAPPING.values() A_ : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(lowercase ) @require_tokenizers def _a (self ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase ) @require_tokenizers def _a (self ): A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase ) A_ : List[Any] = """Hello, world. How are you?""" A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase ) A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def _a (self ): A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(lowercase ) , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def _a (self ): A_ : Any = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(lowercase , lowercase ) def _a (self ): # Check we can load the tokenizer config of an online model. A_ : Tuple = get_tokenizer_config("""bert-base-cased""" ) A_ : Any = config.pop("""_commit_hash""" , lowercase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(lowercase , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. A_ : List[Any] = get_tokenizer_config(lowercase ) self.assertDictEqual(lowercase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. A_ : int = AutoTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Dict = get_tokenizer_config(lowercase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) A_ : Tuple = CustomTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) # Can register in two steps AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: A_ : str = BertTokenizerFast.from_pretrained(lowercase ) bert_tokenizer.save_pretrained(lowercase ) A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase ): A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase ): A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : str = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def _a (self ): class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = False class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = NewTokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = False try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # If remote code is not set, the default is to use local A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. A_ : int = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : List[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): A_ : Dict = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : Optional[int] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def _a (self ): with self.assertRaisesRegex( lowercase , """bert-base is not a local folder and is not a valid model identifier""" ): A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" ) def _a (self ): with self.assertRaisesRegex( lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" ) def _a (self ): # Make sure we have cached the tokenizer. A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
667
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Dict = logging.get_logger(__name__) lowerCamelCase :Dict = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] lowerCamelCase :Tuple = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def a ( lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = torch.load(lowerCamelCase__ , map_location="""cpu""" ) return sd def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=rename_keys_prefix ): '''simple docstring''' A_ : Any = OrderedDict() A_ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A_ : Optional[Any] = key for name_pair in rename_keys_prefix: A_ : Dict = new_key.replace(name_pair[0] , name_pair[1] ) A_ : Any = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A_ : int = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' assert ( checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS ), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.' # Get Config if "pre" in checkpoint_path: A_ : Tuple = """pretraining""" if "vcr" in checkpoint_path: A_ : Optional[int] = {"""visual_embedding_dim""": 5_12} elif "vqa_advanced" in checkpoint_path: A_ : Any = {"""visual_embedding_dim""": 20_48} elif "vqa" in checkpoint_path: A_ : List[str] = {"""visual_embedding_dim""": 20_48} elif "nlvr" in checkpoint_path: A_ : List[Any] = {"""visual_embedding_dim""": 10_24} else: raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' ) else: if "vcr" in checkpoint_path: A_ : Dict = {"""visual_embedding_dim""": 5_12} A_ : Any = """multichoice""" elif "vqa_advanced" in checkpoint_path: A_ : Tuple = {"""visual_embedding_dim""": 20_48} A_ : int = """vqa_advanced""" elif "vqa" in checkpoint_path: A_ : List[str] = {"""visual_embedding_dim""": 20_48, """num_labels""": 31_29} A_ : Optional[Any] = """vqa""" elif "nlvr" in checkpoint_path: A_ : str = { """visual_embedding_dim""": 10_24, """num_labels""": 2, } A_ : Tuple = """nlvr""" A_ : Union[str, Any] = VisualBertConfig(**lowerCamelCase__ ) # Load State Dict A_ : Any = load_state_dict(lowerCamelCase__ ) A_ : List[str] = get_new_dict(lowerCamelCase__ , lowerCamelCase__ ) if model_type == "pretraining": A_ : Union[str, Any] = VisualBertForPreTraining(lowerCamelCase__ ) elif model_type == "vqa": A_ : Union[str, Any] = VisualBertForQuestionAnswering(lowerCamelCase__ ) elif model_type == "nlvr": A_ : int = VisualBertForVisualReasoning(lowerCamelCase__ ) elif model_type == "multichoice": A_ : Dict = VisualBertForMultipleChoice(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # Save Checkpoints Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') lowerCamelCase :Dict = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
667
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) A_ : int = number_of_bytes // partitions A_ : Union[str, Any] = [] for i in range(lowerCamelCase__ ): A_ : Dict = i * bytes_per_partition + 1 A_ : Tuple = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCamelCase :Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: lowerCamelCase :Dict = json.load(f) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def _a (self , lowercase ): return FSMTTokenizer.from_pretrained(lowercase ) def _a (self , lowercase ): A_ : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _a (self , lowercase , lowercase ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality A_ : Union[str, Any] = F'facebook/wmt19-{pair}' A_ : List[Any] = self.get_tokenizer(lowercase ) A_ : int = self.get_model(lowercase ) A_ : List[Any] = bleu_data[pair]["""src"""] A_ : Any = bleu_data[pair]["""tgt"""] A_ : Optional[Any] = tokenizer(lowercase , return_tensors="""pt""" , truncation=lowercase , padding="""longest""" ).to(lowercase ) A_ : Dict = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A_ : List[Any] = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A_ : str = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["""bleu"""] , lowercase )
667
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Any = logging.get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' ) A_ : Union[str, Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : str = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Union[str, Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = dct.pop(lowerCamelCase__ ) A_ : Optional[int] = val def a ( lowerCamelCase__ ): '''simple docstring''' if "handwritten" in checkpoint_url: A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ ) A_ : int = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : List[str] = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Union[str, Any] = 10_24 A_ : List[Any] = 40_96 A_ : Dict = 24 A_ : List[str] = 16 A_ : Union[str, Any] = 10_24 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Optional[Any] = False A_ : Union[str, Any] = """relu""" A_ : List[str] = 10_24 A_ : Tuple = True A_ : Tuple = False A_ : List[str] = False # load HuggingFace model A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ ) A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ ) A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) model.eval() # load state_dict of original model, rename some keys A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""] A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ ) if key.startswith("""decoder""" ) and "output_projection" not in key: A_ : str = val else: A_ : List[str] = val # load state dict model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image A_ : str = ViTImageProcessor(size=encoder_config.image_size ) A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" ) A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values # verify logits A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ) A_ : Dict = outputs.logits A_ : str = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : Any = torch.tensor( [-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] ) elif "trocr-base-printed" in checkpoint_url: A_ : List[Any] = torch.tensor( [-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase :Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
667
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :List[Any] = logging.get_logger(__name__) lowerCamelCase :Union[str, Any] = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model' __SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values'] __SCREAMING_SNAKE_CASE : List[Any] = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ): A_ : Tuple = vocab_size A_ : str = hidden_size A_ : Optional[Any] = d_kv A_ : Tuple = d_ff A_ : str = num_layers A_ : int = num_heads A_ : Dict = relative_attention_num_buckets A_ : Optional[Any] = relative_attention_max_distance A_ : Dict = dropout_rate A_ : Optional[int] = layer_norm_epsilon A_ : Dict = initializer_factor A_ : Any = use_cache A_ : int = eos_token_id A_ : Tuple = decoder_start_token_id # for backwards compatibility A_ : str = dense_act_fn super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , ) @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : int = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model' def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ): super().__init__(**lowercase ) A_ : List[str] = hidden_size A_ : Optional[int] = patch_embed_hidden_size A_ : Any = d_ff A_ : str = dropout_rate A_ : Dict = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : List[Any] = initializer_range A_ : List[str] = initializer_factor A_ : Dict = attention_dropout A_ : Optional[Any] = layer_norm_eps A_ : Optional[Any] = dense_act_fn A_ : List[Any] = seq_len A_ : Tuple = relative_attention_num_buckets A_ : Any = relative_attention_max_distance A_ : int = d_kv @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : Tuple = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = 'pix2struct' __SCREAMING_SNAKE_CASE : List[Any] = True def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ): super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase ) if text_config is None: A_ : Optional[Any] = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) A_ : Tuple = PixaStructTextConfig(**lowercase ) A_ : List[str] = PixaStructVisionConfig(**lowercase ) A_ : Dict = self.text_config.decoder_start_token_id A_ : Union[str, Any] = self.text_config.pad_token_id A_ : str = self.text_config.eos_token_id A_ : List[str] = initializer_factor A_ : int = initializer_range A_ : Tuple = self.initializer_range A_ : Tuple = self.initializer_range A_ : List[str] = is_vqa @classmethod def _a (cls , lowercase , lowercase , **lowercase ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase ) def _a (self ): A_ : Optional[Any] = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : List[Any] = self.vision_config.to_dict() A_ : List[str] = self.__class__.model_type return output
667
'''simple docstring''' print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
667
1
'''simple docstring''' import tensorflow as tf from ...tf_utils import shape_list class _lowerCAmelCase ( tf.keras.layers.Layer ): def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase=1 , lowercase=False , **lowercase ): super().__init__(**lowercase ) A_ : Any = vocab_size A_ : List[Any] = d_embed A_ : Optional[int] = d_proj A_ : List[Any] = cutoffs + [vocab_size] A_ : Optional[Any] = [0] + self.cutoffs A_ : List[Any] = div_val A_ : str = self.cutoffs[0] A_ : List[Any] = len(self.cutoffs ) - 1 A_ : Any = self.shortlist_size + self.n_clusters A_ : Tuple = keep_order A_ : int = [] A_ : Optional[int] = [] def _a (self , lowercase ): if self.n_clusters > 0: A_ : List[str] = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowercase , name="""cluster_weight""" ) A_ : int = self.add_weight( shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowercase , name="""cluster_bias""" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: A_ : List[Any] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowercase , name=F'out_projs_._{i}' , ) self.out_projs.append(lowercase ) else: self.out_projs.append(lowercase ) A_ : Any = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowercase , name=F'out_layers_._{i}_._weight' , ) A_ : List[Any] = self.add_weight( shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowercase , name=F'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): A_, A_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] A_ : Dict = self.d_embed // (self.div_val**i) A_ : Any = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowercase , name=F'out_projs_._{i}' ) self.out_projs.append(lowercase ) A_ : Tuple = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowercase , name=F'out_layers_._{i}_._weight' , ) A_ : Union[str, Any] = self.add_weight( shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowercase , name=F'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias) ) super().build(lowercase ) @staticmethod def _a (lowercase , lowercase , lowercase , lowercase=None ): A_ : Optional[Any] = x if proj is not None: A_ : str = tf.einsum("""ibd,ed->ibe""" , lowercase , lowercase ) return tf.einsum("""ibd,nd->ibn""" , lowercase , lowercase ) + b @staticmethod def _a (lowercase , lowercase ): A_ : Optional[Any] = shape_list(lowercase ) A_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype ) A_ : List[Any] = tf.stack([r, target] , 1 ) return tf.gather_nd(lowercase , lowercase ) def _a (self , lowercase , lowercase , lowercase=True , lowercase=False ): A_ : Optional[Any] = 0 if self.n_clusters == 0: A_ : Optional[Any] = self._logit(lowercase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: A_ : Optional[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowercase , logits=lowercase ) A_ : Dict = tf.nn.log_softmax(lowercase , axis=-1 ) else: A_ : Optional[int] = shape_list(lowercase ) A_ : int = [] A_ : List[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): A_, A_ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: A_ : int = (target >= l_idx) & (target < r_idx) A_ : Tuple = tf.where(lowercase ) A_ : Optional[int] = tf.boolean_mask(lowercase , lowercase ) - l_idx if self.div_val == 1: A_ : Dict = self.out_layers[0][0][l_idx:r_idx] A_ : List[Any] = self.out_layers[0][1][l_idx:r_idx] else: A_ : str = self.out_layers[i][0] A_ : str = self.out_layers[i][1] if i == 0: A_ : List[str] = tf.concat([cur_W, self.cluster_weight] , 0 ) A_ : Union[str, Any] = tf.concat([cur_b, self.cluster_bias] , 0 ) A_ : List[str] = self._logit(lowercase , lowercase , lowercase , self.out_projs[0] ) A_ : Tuple = tf.nn.log_softmax(lowercase ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: A_ : Optional[Any] = tf.boolean_mask(lowercase , lowercase ) A_ : int = self._gather_logprob(lowercase , lowercase ) else: A_ : Union[str, Any] = self._logit(lowercase , lowercase , lowercase , self.out_projs[i] ) A_ : List[Any] = tf.nn.log_softmax(lowercase ) A_ : str = self.cutoffs[0] + i - 1 # No probability for the head cluster A_ : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(lowercase ) if target is not None: A_ : Union[str, Any] = tf.boolean_mask(lowercase , lowercase ) A_ : List[Any] = tf.boolean_mask(lowercase , lowercase ) A_ : Union[str, Any] = self._gather_logprob(lowercase , lowercase ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(lowercase , -cur_logprob , shape_list(lowercase ) ) A_ : Any = tf.concat(lowercase , axis=-1 ) if target is not None: if return_mean: A_ : Optional[int] = tf.reduce_mean(lowercase ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(lowercase ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(lowercase , name=self.name , aggregation="""mean""" if return_mean else """""" ) return out
667
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY) def a ( ): '''simple docstring''' A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ ) # assert negative_img array for at least one True assert negative_img.any() def a ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def a ( ): '''simple docstring''' A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a ( ): '''simple docstring''' A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() A_ : List[Any] = canny.canny(lowerCamelCase__ ) # assert canny array for at least one True assert canny_array.any() def a ( ): '''simple docstring''' assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all() def a ( ): '''simple docstring''' A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ ) assert res.any() def a ( ): '''simple docstring''' assert med.median_filter(lowerCamelCase__ , 3 ).any() def a ( ): '''simple docstring''' A_, A_ : int = sob.sobel_filter(lowerCamelCase__ ) assert grad.any() and theta.any() def a ( ): '''simple docstring''' A_ : int = sp.make_sepia(lowerCamelCase__ , 20 ) assert sepia.all() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def a ( ): '''simple docstring''' A_ : int = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 ) # Test for get_neighbors_pixel function() return not None A_ : str = 0 A_ : str = 0 A_ : Dict = image[x_coordinate][y_coordinate] A_ : Optional[Any] = lbp.get_neighbors_pixel( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A_ : str = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert lbp_image.any()
667
1
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) A_ : int = number_of_bytes // partitions A_ : Union[str, Any] = [] for i in range(lowerCamelCase__ ): A_ : Dict = i * bytes_per_partition + 1 A_ : Tuple = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
667
'''simple docstring''' from importlib import import_module from .logging import get_logger lowerCamelCase :Dict = get_logger(__name__) class _lowerCAmelCase : def __init__(self , lowercase , lowercase=None ): A_ : Optional[int] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , lowercase , getattr(lowercase , lowercase ) ) A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Dict = [] def __init__(self , lowercase , lowercase , lowercase , lowercase=None ): A_ : Union[str, Any] = obj A_ : Optional[int] = target A_ : Optional[Any] = new A_ : Optional[Any] = target.split(""".""" )[0] A_ : Tuple = {} A_ : Optional[int] = attrs or [] def __enter__(self ): *A_, A_ : Optional[Any] = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase ) ): try: A_ : Any = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): A_ : int = getattr(self.obj , lowercase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): A_ : str = obj_attr # patch at top level setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) ) A_ : Optional[Any] = getattr(self.obj , lowercase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) ) A_ : Dict = getattr(lowercase , lowercase ) # finally set the target attribute setattr(lowercase , lowercase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowercase ) is attr_value: A_ : Dict = getattr(self.obj , lowercase ) setattr(self.obj , lowercase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" A_ : int = globals()["""__builtins__"""][target_attr] setattr(self.obj , lowercase , self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__(self , *lowercase ): for attr in list(self.original ): setattr(self.obj , lowercase , self.original.pop(lowercase ) ) def _a (self ): self.__enter__() self._active_patches.append(self ) def _a (self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
667
1
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class _lowerCAmelCase : def __init__(self , lowercase ): A_ : int = str(id_ ) A_ : Optional[Any] = None A_ : Dict = None A_ : Optional[Any] = [] A_ : Union[str, Any] = {} # {vertex:distance} def __lt__(self , lowercase ): return self.key < other.key def __repr__(self ): return self.id def _a (self , lowercase ): self.neighbors.append(lowercase ) def _a (self , lowercase , lowercase ): A_ : int = weight def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCamelCase__ ) graph[b - 1].add_edge(graph[a - 1] , lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = [] for u in graph: A_ : Optional[Any] = math.inf A_ : Any = None A_ : Any = 0 A_ : Optional[Any] = graph[:] while q: A_ : int = min(lowerCamelCase__ ) q.remove(lowerCamelCase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): A_ : List[str] = u A_ : Any = u.edges[v.id] for i in range(1 , len(lowerCamelCase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for u in graph: A_ : Union[str, Any] = math.inf A_ : List[str] = None A_ : Optional[Any] = 0 A_ : Union[str, Any] = list(lowerCamelCase__ ) hq.heapify(lowerCamelCase__ ) while h: A_ : Tuple = hq.heappop(lowerCamelCase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): A_ : Optional[int] = u A_ : Tuple = u.edges[v.id] hq.heapify(lowerCamelCase__ ) for i in range(1 , len(lowerCamelCase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
667
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase :int = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :List[Any] = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Any = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY) def a ( ): '''simple docstring''' A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ ) # assert negative_img array for at least one True assert negative_img.any() def a ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def a ( ): '''simple docstring''' A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a ( ): '''simple docstring''' A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() A_ : List[Any] = canny.canny(lowerCamelCase__ ) # assert canny array for at least one True assert canny_array.any() def a ( ): '''simple docstring''' assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all() def a ( ): '''simple docstring''' A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ ) assert res.any() def a ( ): '''simple docstring''' assert med.median_filter(lowerCamelCase__ , 3 ).any() def a ( ): '''simple docstring''' A_, A_ : int = sob.sobel_filter(lowerCamelCase__ ) assert grad.any() and theta.any() def a ( ): '''simple docstring''' A_ : int = sp.make_sepia(lowerCamelCase__ , 20 ) assert sepia.all() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def a ( ): '''simple docstring''' A_ : int = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 ) # Test for get_neighbors_pixel function() return not None A_ : str = 0 A_ : str = 0 A_ : Dict = image[x_coordinate][y_coordinate] A_ : Optional[Any] = lbp.get_neighbors_pixel( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A_ : str = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert lbp_image.any()
667
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ): super().__init__() self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase ) # create a imagenet -> id dictionary for easier use A_ : str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): A_ : Optional[Any] = int(lowercase ) A_ : List[Any] = dict(sorted(self.labels.items() ) ) def _a (self , lowercase ): if not isinstance(lowercase , lowercase ): A_ : Optional[int] = list(lowercase ) for l in label: if l not in self.labels: raise ValueError( F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ): A_ : Tuple = len(lowercase ) A_ : Optional[Any] = self.transformer.config.sample_size A_ : int = self.transformer.config.in_channels A_ : Optional[int] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , ) A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 ) A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device ) A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: A_ : List[Any] = latent_model_input[: len(lowercase ) // 2] A_ : List[str] = torch.cat([half, half] , dim=0 ) A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase ) A_ : Tuple = t if not torch.is_tensor(lowercase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) A_ : Optional[Any] = latent_model_input.device.type == """mps""" if isinstance(lowercase , lowercase ): A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa else: A_ : List[Any] = torch.intaa if is_mps else torch.intaa A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: A_ : List[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A_ : int = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output A_ : List[Any] = self.transformer( lowercase , timestep=lowercase , class_labels=lowercase ).sample # perform guidance if guidance_scale > 1: A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 ) A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps) A_ : str = torch.cat([half_eps, half_eps] , dim=0 ) A_ : Optional[int] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: A_, A_ : int = torch.split(lowercase , lowercase , dim=1 ) else: A_ : Optional[int] = noise_pred # compute previous image: x_t -> x_t-1 A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample if guidance_scale > 1: A_, A_ : int = latent_model_input.chunk(2 , dim=0 ) else: A_ : Union[str, Any] = latent_model_input A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents A_ : List[Any] = self.vae.decode(lowercase ).sample A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A_ : int = self.numpy_to_pil(lowercase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowercase )
667
1
'''simple docstring''' import math def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = [] A_ : int = 2 A_ : Tuple = int(math.sqrt(lowerCamelCase__ ) ) # Size of every segment A_ : List[str] = [True] * (end + 1) A_ : Tuple = [] while start <= end: if temp[start] is True: in_prime.append(lowerCamelCase__ ) for i in range(start * start , end + 1 , lowerCamelCase__ ): A_ : List[str] = False start += 1 prime += in_prime A_ : List[str] = end + 1 A_ : int = min(2 * end , lowerCamelCase__ ) while low <= n: A_ : List[str] = [True] * (high - low + 1) for each in in_prime: A_ : Union[str, Any] = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCamelCase__ , high + 1 , lowerCamelCase__ ): A_ : List[Any] = False for j in range(len(lowerCamelCase__ ) ): if temp[j] is True: prime.append(j + low ) A_ : str = high + 1 A_ : Optional[int] = min(high + end , lowerCamelCase__ ) return prime print(sieve(1_0**6))
667
'''simple docstring''' import math lowerCamelCase :int = 1_0 lowerCamelCase :List[Any] = 7 lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def a ( lowerCamelCase__ = 20 ): '''simple docstring''' A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ ) A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total) return f'{result:.9f}' if __name__ == "__main__": print(solution(2_0))
667
1
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : int = AutoConfig.from_pretrained(lowerCamelCase__ ) A_ : Any = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase__ ) A_ : Optional[Any] = checkpoints.load_tax_checkpoint(lowerCamelCase__ ) A_ : Optional[int] = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": A_ : Tuple = """SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": A_ : Optional[Any] = """LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": A_ : Optional[Any] = """TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`""" """ attribute with a value from ['local', 'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): A_ : Optional[int] = f'layers_{str(lowerCamelCase__ )}' # Self-Attention A_ : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] A_ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] A_ : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] A_ : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": A_ : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization A_ : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: A_ : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] A_ : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: A_ : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] A_ : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization A_ : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning A_ : Optional[int] = flax_model.params["""encoder"""]["""block"""][str(lowerCamelCase__ )]["""layer"""] A_ : Union[str, Any] = tax_attention_key A_ : str = tax_attention_out A_ : Optional[Any] = tax_attention_query A_ : List[str] = tax_attention_value A_ : str = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": A_ : Optional[Any] = tax_global_layer_norm if split_mlp_wi: A_ : Optional[int] = tax_mlp_wi_a A_ : Optional[int] = tax_mlp_wi_a else: A_ : Union[str, Any] = tax_mlp_wi A_ : List[Any] = tax_mlp_wo A_ : List[Any] = tax_mlp_layer_norm A_ : Union[str, Any] = flax_model_encoder_layer_block # Only for layer 0: A_ : Any = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T A_ : Optional[int] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": A_ : Tuple = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T A_ : Optional[int] = tax_encoder_global_rel_embedding # Assigning A_ : str = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] A_ : Union[str, Any] = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): A_ : Any = f'layers_{str(lowerCamelCase__ )}' # Self-Attention A_ : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] A_ : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] A_ : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] A_ : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization A_ : Dict = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention A_ : str = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] A_ : List[Any] = tax_enc_dec_attention_module["""key"""]["""kernel"""] A_ : Optional[int] = tax_enc_dec_attention_module["""out"""]["""kernel"""] A_ : int = tax_enc_dec_attention_module["""query"""]["""kernel"""] A_ : int = tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization A_ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: A_ : int = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] A_ : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: A_ : int = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] A_ : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization A_ : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning A_ : Union[str, Any] = flax_model.params["""decoder"""]["""block"""][str(lowerCamelCase__ )]["""layer"""] A_ : Dict = tax_attention_key A_ : Dict = tax_attention_out A_ : Union[str, Any] = tax_attention_query A_ : int = tax_attention_value A_ : List[str] = tax_pre_attention_layer_norm A_ : Dict = tax_enc_dec_attention_key A_ : Optional[Any] = tax_enc_dec_attention_out A_ : Union[str, Any] = tax_enc_dec_attention_query A_ : Union[str, Any] = tax_enc_dec_attention_value A_ : str = tax_cross_layer_norm if split_mlp_wi: A_ : List[Any] = tax_mlp_wi_a A_ : Any = tax_mlp_wi_a else: A_ : Optional[Any] = tax_mlp_wi A_ : Any = tax_mlp_wo A_ : int = txa_mlp_layer_norm A_ : Dict = flax_model_decoder_layer_block # Decoder Normalization A_ : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] A_ : Optional[Any] = txa_decoder_norm # Only for layer 0: A_ : List[str] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T A_ : Tuple = tax_decoder_rel_embedding # Token Embeddings A_ : Optional[Any] = tax_model["""target"""]["""token_embedder"""]["""embedding"""] A_ : Optional[Any] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: A_ : Dict = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(lowerCamelCase__ ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": lowerCamelCase :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCamelCase :List[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
667
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :List[Any] = logging.get_logger(__name__) lowerCamelCase :Union[str, Any] = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model' __SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values'] __SCREAMING_SNAKE_CASE : List[Any] = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ): A_ : Tuple = vocab_size A_ : str = hidden_size A_ : Optional[Any] = d_kv A_ : Tuple = d_ff A_ : str = num_layers A_ : int = num_heads A_ : Dict = relative_attention_num_buckets A_ : Optional[Any] = relative_attention_max_distance A_ : Dict = dropout_rate A_ : Optional[int] = layer_norm_epsilon A_ : Dict = initializer_factor A_ : Any = use_cache A_ : int = eos_token_id A_ : Tuple = decoder_start_token_id # for backwards compatibility A_ : str = dense_act_fn super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , ) @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : int = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model' def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ): super().__init__(**lowercase ) A_ : List[str] = hidden_size A_ : Optional[int] = patch_embed_hidden_size A_ : Any = d_ff A_ : str = dropout_rate A_ : Dict = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : List[Any] = initializer_range A_ : List[str] = initializer_factor A_ : Dict = attention_dropout A_ : Optional[Any] = layer_norm_eps A_ : Optional[Any] = dense_act_fn A_ : List[Any] = seq_len A_ : Tuple = relative_attention_num_buckets A_ : Any = relative_attention_max_distance A_ : int = d_kv @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : Tuple = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = 'pix2struct' __SCREAMING_SNAKE_CASE : List[Any] = True def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ): super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase ) if text_config is None: A_ : Optional[Any] = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) A_ : Tuple = PixaStructTextConfig(**lowercase ) A_ : List[str] = PixaStructVisionConfig(**lowercase ) A_ : Dict = self.text_config.decoder_start_token_id A_ : Union[str, Any] = self.text_config.pad_token_id A_ : str = self.text_config.eos_token_id A_ : List[str] = initializer_factor A_ : int = initializer_range A_ : Tuple = self.initializer_range A_ : Tuple = self.initializer_range A_ : List[str] = is_vqa @classmethod def _a (cls , lowercase , lowercase , **lowercase ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase ) def _a (self ): A_ : Optional[Any] = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : List[Any] = self.vision_config.to_dict() A_ : List[str] = self.__class__.model_type return output
667
1
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(lowerCamelCase__ , n - 1 , lowerCamelCase__ ) * a) % mod else: A_ : Any = binary_exponentiation(lowerCamelCase__ , n / 2 , lowerCamelCase__ ) return (b * b) % mod # a prime number lowerCamelCase :Optional[Any] = 7_0_1 lowerCamelCase :List[str] = 1_0_0_0_0_0_0_0_0_0 lowerCamelCase :List[str] = 1_0 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
667
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available lowerCamelCase :Union[str, Any] = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :int = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Tuple = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
1
'''simple docstring''' print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
667
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer'] __SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor' __SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__(self , lowercase=None , lowercase=None , **lowercase ): A_ : Dict = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase , ) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase , lowercase ) def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) A_ : Dict = features["""words"""] A_ : Optional[int] = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) # add pixel values A_ : List[Any] = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] ) A_ : Optional[int] = images return encoded_inputs def _a (self , lowercase , lowercase ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image A_ : str = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase ) != len(lowercase ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" F' {len(lowercase )} and {len(lowercase )}' ) return images_with_overflow def _a (self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property def _a (self ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _a (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , ) return self.image_processor_class @property def _a (self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , ) return self.image_processor
667
1
'''simple docstring''' from __future__ import annotations import math class _lowerCAmelCase : def __init__(self , lowercase ): A_ : str = size # approximate the overall size of segment tree with given value A_ : str = [0 for i in range(0 , 4 * size )] # create array to store lazy update A_ : Optional[Any] = [0 for i in range(0 , 4 * size )] A_ : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update def _a (self , lowercase ): return idx * 2 def _a (self , lowercase ): return idx * 2 + 1 def _a (self , lowercase , lowercase , lowercase , lowercase ): if left_element == right_element: A_ : Any = a[left_element - 1] else: A_ : List[Any] = (left_element + right_element) // 2 self.build(self.left(lowercase ) , lowercase , lowercase , lowercase ) self.build(self.right(lowercase ) , mid + 1 , lowercase , lowercase ) A_ : Tuple = max( self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] ) def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): if self.flag[idx] is True: A_ : List[str] = self.lazy[idx] A_ : List[str] = False if left_element != right_element: A_ : Optional[Any] = self.lazy[idx] A_ : Dict = self.lazy[idx] A_ : List[str] = True A_ : Optional[int] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: A_ : List[str] = val if left_element != right_element: A_ : Union[str, Any] = val A_ : Tuple = val A_ : List[str] = True A_ : str = True return True A_ : str = (left_element + right_element) // 2 self.update(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase , lowercase ) self.update(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase , lowercase ) A_ : Optional[Any] = max( self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] ) return True def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ): if self.flag[idx] is True: A_ : Optional[Any] = self.lazy[idx] A_ : List[str] = False if left_element != right_element: A_ : List[str] = self.lazy[idx] A_ : int = self.lazy[idx] A_ : int = True A_ : Any = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] A_ : Union[str, Any] = (left_element + right_element) // 2 A_ : str = self.query(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase ) A_ : Optional[int] = self.query(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase ) return max(lowercase , lowercase ) def __str__(self ): return str([self.query(1 , 1 , self.size , lowercase , lowercase ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": lowerCamelCase :Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] lowerCamelCase :int = 1_5 lowerCamelCase :Optional[int] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
667
'''simple docstring''' from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCamelCase :Optional[int] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , **lowercase ): super().__init__(**lowercase ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) self.check_model_type(lowercase ) def _a (self , **lowercase ): A_ : str = {} A_ : Dict = {} A_ : str = {} # preprocess args if "points_per_batch" in kwargs: A_ : Dict = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: A_ : int = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: A_ : str = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: A_ : int = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: A_ : Any = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: A_ : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: A_ : Union[str, Any] = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: A_ : List[str] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: A_ : List[Any] = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ): return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase ) def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ): A_ : Tuple = load_image(lowercase ) A_ : int = self.image_processor.size["""longest_edge"""] A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": A_ : Optional[Any] = self.get_inference_context() with inference_context(): A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device ) A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) A_ : Tuple = image_embeddings A_ : Dict = grid_points.shape[1] A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , lowercase , lowercase ): A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :] A_ : List[Any] = input_labels[:, i : i + points_per_batch] A_ : Optional[Any] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ): A_ : Any = model_inputs.pop("""input_boxes""" ) A_ : str = model_inputs.pop("""is_last""" ) A_ : int = model_inputs.pop("""original_sizes""" ).tolist() A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist() A_ : List[str] = self.model(**lowercase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks A_ : Optional[int] = model_outputs["""pred_masks"""] A_ : Tuple = self.image_processor.post_process_masks( lowercase , lowercase , lowercase , lowercase , binarize=lowercase ) A_ : Union[str, Any] = model_outputs["""iou_scores"""] A_, A_, A_ : Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ): A_ : Tuple = [] A_ : Optional[Any] = [] A_ : str = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) A_ : Any = torch.cat(lowercase ) A_ : List[Any] = torch.cat(lowercase ) A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation( lowercase , lowercase , lowercase , lowercase ) A_ : int = defaultdict(lowercase ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowercase ) A_ : Optional[int] = {} if output_rle_mask: A_ : List[str] = rle_mask if output_bboxes_mask: A_ : Optional[int] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
667
1
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCamelCase :Optional[int] = logging.get_logger(__name__) lowerCamelCase :Dict[Optional[str], Type[Formatter]] = {} lowerCamelCase :Dict[Optional[str], str] = {} lowerCamelCase :Dict[Optional[str], Exception] = {} def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , ): '''simple docstring''' A_ : List[Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) A_ : str = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) A_ : Union[str, Any] = format_type def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ): '''simple docstring''' A_ : List[str] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): A_ : Dict = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCamelCase :Union[str, Any] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCamelCase :List[str] = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCamelCase :List[str] = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def a ( lowerCamelCase__ ): '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def a ( lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' A_ : Dict = get_format_type_from_alias(lowerCamelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCamelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
667
'''simple docstring''' from collections.abc import Callable import numpy as np def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) A_ : int = np.zeros((n + 1,) ) A_ : List[str] = ya A_ : Any = xa for k in range(lowerCamelCase__ ): A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) A_ : Optional[int] = y[k] + ( (step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase :Dict = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :int = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :List[str] = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Any = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
667
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ ) if matches: A_ : Optional[Any] = float(matches[1] ) A_ : Union[str, Any] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". A_ : Optional[Any] = 10_01 A_ : Union[str, Any] = """imagenet-1k-id2label.json""" A_ : List[str] = """huggingface/label-files""" A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()} A_ : int = """background""" A_ : List[str] = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def a ( ): '''simple docstring''' A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ ) # Load 🤗 model A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor A_ : Any = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" ) A_ : List[str] = model(**lowerCamelCase__ ) A_ : Any = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: A_ : Any = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print("""Pushing to the hub...""" ) A_ : Union[str, Any] = """google/""" + model_name image_processor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase :str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
667
1
'''simple docstring''' from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
667
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer' __SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer'] __SCREAMING_SNAKE_CASE : Tuple = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__(self , lowercase , lowercase=None ): super().__init__(lowercase ) A_ : Any = speaker_embeddings @classmethod def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: A_ : Any = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) A_ : str = None else: with open(lowercase ) as speaker_embeddings_json: A_ : List[str] = json.load(lowercase ) else: A_ : str = None A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase ) A_ : Optional[int] = {} A_ : Tuple = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": A_ : Union[str, Any] = self._load_voice_preset(lowercase ) A_ : Tuple = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , ) A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' ) A_ : str = tmp_dict with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def _a (self , lowercase = None , **lowercase ): A_ : List[Any] = self.speaker_embeddings[voice_preset] A_ : Optional[Any] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) A_ : int = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) A_ : Tuple = np.load(lowercase ) return voice_preset_dict def _a (self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): A_ : Optional[int] = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ): A_ : Optional[int] = voice_preset + """.npz""" A_ : Any = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase ) A_ : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: A_ : Union[str, Any] = voice_preset return encoded_text
667
1
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ): '''simple docstring''' model.train() A_ : int = model(lowerCamelCase__ ) A_ : str = F.mse_loss(lowerCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' set_seed(42 ) A_ : Tuple = RegressionModel() A_ : List[Any] = deepcopy(lowerCamelCase__ ) A_ : List[Any] = RegressionDataset(length=80 ) A_ : Optional[Any] = DataLoader(lowerCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: A_ : Any = AdamW(params=model.parameters() , lr=1E-3 ) A_ : Dict = AdamW(params=ddp_model.parameters() , lr=1E-3 ) A_ : int = LambdaLR(lowerCamelCase__ , lr_lambda=lambda lowerCamelCase__ : epoch**0.65 ) A_ : Dict = LambdaLR(lowerCamelCase__ , lr_lambda=lambda lowerCamelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: A_, A_, A_, A_ : List[Any] = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: A_, A_ : str = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def a ( lowerCamelCase__ ): '''simple docstring''' A_, A_, A_ : Optional[int] = get_training_setup(lowerCamelCase__ ) # Use a single batch A_, A_ : List[str] = next(iter(lowerCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A_, A_ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) ) A_, A_ : Any = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCamelCase__ ): step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: # Sync grads step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) A_ : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase__ ) )] def a ( lowerCamelCase__ ): '''simple docstring''' A_, A_, A_ : Dict = get_training_setup(lowerCamelCase__ ) # Use a single batch A_, A_ : Tuple = next(iter(lowerCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A_, A_ : str = accelerator.gather((ddp_input, ddp_target) ) A_, A_ : int = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCamelCase__ ): step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: # Sync grads step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) A_ : Optional[int] = ddp_input[torch.randperm(len(lowerCamelCase__ ) )] def a ( lowerCamelCase__=False , lowerCamelCase__=False ): '''simple docstring''' A_ : Optional[int] = Accelerator( split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A_, A_, A_ : Optional[Any] = get_training_setup(lowerCamelCase__ ) for iteration, batch in enumerate(lowerCamelCase__ ): A_, A_ : Any = batch.values() # Gather the distributed inputs and targs for the base model A_, A_ : Dict = accelerator.gather((ddp_input, ddp_target) ) A_, A_ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCamelCase__ ): step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) A_ : Any = ddp_input[torch.randperm(len(lowerCamelCase__ ) )] GradientState._reset_state() def a ( lowerCamelCase__=False , lowerCamelCase__=False ): '''simple docstring''' A_ : Optional[Any] = Accelerator( split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A_, A_, A_, A_, A_, A_, A_ : Any = get_training_setup(lowerCamelCase__ , lowerCamelCase__ ) for iteration, batch in enumerate(lowerCamelCase__ ): A_, A_ : int = batch.values() # Gather the distributed inputs and targs for the base model A_, A_ : str = accelerator.gather((ddp_input, ddp_target) ) A_, A_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCamelCase__ ): step_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' A_ : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def a ( ): '''simple docstring''' A_ : Optional[Any] = Accelerator() A_ : Optional[Any] = RegressionDataset(length=80 ) A_ : Tuple = DataLoader(lowerCamelCase__ , batch_size=16 ) A_ : Union[str, Any] = RegressionDataset(length=96 ) A_ : Any = DataLoader(lowerCamelCase__ , batch_size=16 ) A_, A_ : str = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase__ ) if iteration < len(lowerCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase__ ) if batch_num < len(lowerCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def a ( ): '''simple docstring''' A_ : List[Any] = Accelerator() A_ : Dict = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(lowerCamelCase__ , lowerCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase__ , lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' main() if __name__ == "__main__": main()
667
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Union[str, Any] = tempfile.mkdtemp() A_ : List[Any] = BlipImageProcessor() A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase ) processor.save_pretrained(self.tmpdirname ) def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer def _a (self ): shutil.rmtree(self.tmpdirname ) def _a (self ): A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a (self ): A_ : str = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 ) A_ : str = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase ) self.assertIsInstance(processor.qformer_tokenizer , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : List[str] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = self.prepare_image_inputs() A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" ) A_ : Dict = processor(images=lowercase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a (self ): A_ : List[Any] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : Any = self.get_qformer_tokenizer() A_ : List[str] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : str = """lower newer""" A_ : List[Any] = processor(text=lowercase ) A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase ) A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def _a (self ): A_ : int = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Any = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Optional[int] = """lower newer""" A_ : Optional[int] = self.prepare_image_inputs() A_ : Tuple = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(lowercase ): processor() def _a (self ): A_ : Dict = self.get_image_processor() A_ : str = self.get_tokenizer() A_ : Optional[int] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : Optional[int] = processor.batch_decode(lowercase ) A_ : Dict = tokenizer.batch_decode(lowercase ) self.assertListEqual(lowercase , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Dict = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Optional[int] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = """lower newer""" A_ : Optional[Any] = self.prepare_image_inputs() A_ : Any = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
667
1
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Any = logging.get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' ) A_ : Union[str, Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : str = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Union[str, Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = dct.pop(lowerCamelCase__ ) A_ : Optional[int] = val def a ( lowerCamelCase__ ): '''simple docstring''' if "handwritten" in checkpoint_url: A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ ) A_ : int = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : List[str] = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Union[str, Any] = 10_24 A_ : List[Any] = 40_96 A_ : Dict = 24 A_ : List[str] = 16 A_ : Union[str, Any] = 10_24 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Optional[Any] = False A_ : Union[str, Any] = """relu""" A_ : List[str] = 10_24 A_ : Tuple = True A_ : Tuple = False A_ : List[str] = False # load HuggingFace model A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ ) A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ ) A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) model.eval() # load state_dict of original model, rename some keys A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""] A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ ) if key.startswith("""decoder""" ) and "output_projection" not in key: A_ : str = val else: A_ : List[str] = val # load state dict model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image A_ : str = ViTImageProcessor(size=encoder_config.image_size ) A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" ) A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values # verify logits A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ) A_ : Dict = outputs.logits A_ : str = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : Any = torch.tensor( [-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] ) elif "trocr-base-printed" in checkpoint_url: A_ : List[Any] = torch.tensor( [-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase :Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
667
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Optional[Any] = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str' def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ): super().__init__(**lowercase ) A_ : int = image_size A_ : List[str] = patch_size A_ : Tuple = num_channels A_ : List[str] = max_token_length A_ : int = num_character_labels A_ : str = num_bpe_labels A_ : Tuple = num_wordpiece_labels A_ : Optional[int] = hidden_size A_ : List[Any] = num_hidden_layers A_ : int = num_attention_heads A_ : Tuple = mlp_ratio A_ : str = distilled A_ : Union[str, Any] = layer_norm_eps A_ : str = drop_rate A_ : int = qkv_bias A_ : Dict = attn_drop_rate A_ : List[Any] = drop_path_rate A_ : Any = output_aa_attentions A_ : Union[str, Any] = initializer_range
667
1
'''simple docstring''' import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def a ( lowerCamelCase__ ): '''simple docstring''' if "model" in orig_key: A_ : List[Any] = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: A_ : Optional[int] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: A_ : Dict = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: A_ : List[str] = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: A_ : Any = orig_key.split(""".""" )[0].split("""_""" )[-1] A_ : Union[str, Any] = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' ) if "mha.attn" in orig_key: A_ : Dict = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: A_ : Optional[int] = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: A_ : Optional[int] = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: A_ : Tuple = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: A_ : Tuple = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: A_ : str = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: A_ : Dict = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: A_ : List[str] = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: A_ : int = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: A_ : int = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: A_ : List[str] = """yoso.""" + orig_key return orig_key def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A_ : Dict = orig_state_dict.pop(lowerCamelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: A_ : Dict = val A_ : Optional[Any] = orig_state_dict["""cls.predictions.decoder.bias"""] A_ : str = torch.arange(lowerCamelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model_state_dict"""] A_ : Any = YosoConfig.from_json_file(lowerCamelCase__ ) A_ : List[str] = YosoForMaskedLM(lowerCamelCase__ ) A_ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , lowerCamelCase__ ) print(model.load_state_dict(lowerCamelCase__ ) ) model.eval() model.save_pretrained(lowerCamelCase__ ) print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) if __name__ == "__main__": lowerCamelCase :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase :Optional[int] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
667
'''simple docstring''' import math from collections.abc import Callable def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : float = xa A_ : float = xa while True: if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ): raise ZeroDivisionError("""float division by zero, could not find root""" ) A_ : float = x_na - ( function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na A_ : Tuple = x_na A_ : List[Any] = x_na def a ( lowerCamelCase__ ): '''simple docstring''' return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
667
1
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCamelCase :List[str] = '''<<<<<<< This should probably be modified because it mentions: ''' lowerCamelCase :Optional[Any] = '''======= >>>>>>> ''' lowerCamelCase :Dict = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] lowerCamelCase :Any = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def a ( lowerCamelCase__ ): '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class _lowerCAmelCase ( __UpperCAmelCase ): @staticmethod def _a (lowercase ): A_ : List[Any] = parser.add_parser( """convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , ) train_parser.add_argument( """--tfds_path""" , type=lowercase , required=lowercase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , ) train_parser.add_argument( """--datasets_directory""" , type=lowercase , required=lowercase , help="""Path to the HuggingFace Datasets folder.""" ) train_parser.set_defaults(func=lowercase ) def __init__(self , lowercase , lowercase , *lowercase ): A_ : str = get_logger("""datasets-cli/converting""" ) A_ : Optional[Any] = tfds_path A_ : List[str] = datasets_directory def _a (self ): if os.path.isdir(self._tfds_path ): A_ : Dict = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): A_ : List[Any] = os.path.dirname(self._tfds_path ) else: raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" ) A_ : Optional[int] = os.path.abspath(self._datasets_directory ) self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' ) A_ : Optional[int] = [] A_ : List[str] = [] A_ : Optional[Any] = {} if os.path.isdir(self._tfds_path ): A_ : List[str] = os.listdir(lowercase ) else: A_ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'Looking at file {f_name}' ) A_ : List[Any] = os.path.join(lowercase , lowercase ) A_ : List[str] = os.path.join(lowercase , lowercase ) if not os.path.isfile(lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("""Skipping file""" ) continue with open(lowercase , encoding="""utf-8""" ) as f: A_ : List[str] = f.readlines() A_ : List[Any] = [] A_ : List[Any] = False A_ : Optional[Any] = False A_ : Tuple = [] for line in lines: A_ : List[str] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: A_ : List[Any] = """import datasets\n""" elif "import tensorflow" in out_line: # order is important here A_ : List[str] = """""" continue elif "from absl import logging" in out_line: A_ : List[Any] = """from datasets import logging\n""" elif "getLogger" in out_line: A_ : Tuple = out_line.replace("""getLogger""" , """get_logger""" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): A_ : str = True A_ : Tuple = list(filter(lambda lowercase : e in out_line , lowercase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase ) + """\n""" ) out_lines.append(lowercase ) out_lines.append(lowercase ) continue else: for pattern, replacement in TO_CONVERT: A_ : Optional[Any] = re.sub(lowercase , lowercase , lowercase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: A_ : List[Any] = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowercase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) ) A_ : str = """from . import """ + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'Error converting {out_line.strip()}' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: A_ : List[str] = True out_lines.append(lowercase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset A_ : List[str] = f_name.replace(""".py""" , """""" ) A_ : str = os.path.join(lowercase , lowercase ) A_ : List[str] = os.path.join(lowercase , lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) self._logger.info(F'Adding directory {output_dir}' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowercase ) if needs_manual_update: with_manual_update.append(lowercase ) with open(lowercase , """w""" , encoding="""utf-8""" ) as f: f.writelines(lowercase ) self._logger.info(F'Converted in {output_file}' ) for utils_file in utils_files: try: A_ : Any = os.path.basename(lowercase ) A_ : Any = imports_to_builder_map[f_name.replace(""".py""" , """""" )] self._logger.info(F'Moving {dest_folder} to {utils_file}' ) shutil.copy(lowercase , lowercase ) except KeyError: self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
667
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase :Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values'] def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ): super().__init__(**lowercase ) A_ : Dict = size if size is not None else {"""shortest_edge""": 224} A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" ) A_ : str = do_resize A_ : str = size A_ : List[str] = resample A_ : Any = do_center_crop A_ : Union[str, Any] = crop_size A_ : List[Any] = do_rescale A_ : List[Any] = rescale_factor A_ : Dict = do_normalize A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Union[str, Any] = do_convert_rgb def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): A_ : List[str] = do_resize if do_resize is not None else self.do_resize A_ : int = size if size is not None else self.size A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase ) A_ : int = resample if resample is not None else self.resample A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase ) A_ : str = do_rescale if do_rescale is not None else self.do_rescale A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : Any = image_mean if image_mean is not None else self.image_mean A_ : Any = image_std if image_std is not None else self.image_std A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : List[str] = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : int = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A_ : int = [to_numpy_array(lowercase ) for image in images] if do_resize: A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
667
1
'''simple docstring''' def a ( lowerCamelCase__ ): '''simple docstring''' return str(lowerCamelCase__ ) == str(lowerCamelCase__ )[::-1] def a ( lowerCamelCase__ ): '''simple docstring''' return int(lowerCamelCase__ ) + int(str(lowerCamelCase__ )[::-1] ) def a ( lowerCamelCase__ = 1_00_00 ): '''simple docstring''' A_ : Optional[int] = [] for num in range(1 , lowerCamelCase__ ): A_ : List[Any] = 0 A_ : Any = num while iterations < 50: A_ : int = sum_reverse(lowerCamelCase__ ) iterations += 1 if is_palindrome(lowerCamelCase__ ): break else: lychrel_nums.append(lowerCamelCase__ ) return len(lowerCamelCase__ ) if __name__ == "__main__": print(F"{solution() = }")
667
'''simple docstring''' class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase ): A_ : List[str] = name A_ : Dict = value A_ : Optional[int] = weight def __repr__(self ): return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def _a (self ): return self.value def _a (self ): return self.name def _a (self ): return self.weight def _a (self ): return self.value / self.weight def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = [] for i in range(len(lowerCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ ) A_ : Any = [] A_, A_ : Tuple = 0.0, 0.0 for i in range(len(lowerCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def a ( lowerCamelCase__ ): '''simple docstring''' if ( (cp >= 0X4E_00 and cp <= 0X9F_FF) or (cp >= 0X34_00 and cp <= 0X4D_BF) # or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) # or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) # or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) # or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) # or (cp >= 0XF9_00 and cp <= 0XFA_FF) or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) # ): # return True return False def a ( lowerCamelCase__ ): '''simple docstring''' for char in word: A_ : List[str] = ord(lowerCamelCase__ ) if not _is_chinese_char(lowerCamelCase__ ): return 0 return 1 def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = set() for token in tokens: A_ : Any = len(lowerCamelCase__ ) > 1 and is_chinese(lowerCamelCase__ ) if chinese_word: word_set.add(lowerCamelCase__ ) A_ : List[Any] = list(lowerCamelCase__ ) return word_list def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if not chinese_word_set: return bert_tokens A_ : str = max([len(lowerCamelCase__ ) for w in chinese_word_set] ) A_ : Dict = bert_tokens A_, A_ : int = 0, len(lowerCamelCase__ ) while start < end: A_ : Optional[Any] = True if is_chinese(bert_word[start] ): A_ : int = min(end - start , lowerCamelCase__ ) for i in range(lowerCamelCase__ , 1 , -1 ): A_ : str = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): A_ : str = """##""" + bert_word[j] A_ : List[str] = start + i A_ : Union[str, Any] = False break if single_word: start += 1 return bert_word def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = [] for i in range(0 , len(lowerCamelCase__ ) , 1_00 ): A_ : Optional[Any] = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["""cws"""] ).cws A_ : Optional[int] = [get_chinese_word(lowerCamelCase__ ) for r in res] ltp_res.extend(lowerCamelCase__ ) assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) A_ : Optional[int] = [] for i in range(0 , len(lowerCamelCase__ ) , 1_00 ): A_ : Optional[int] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=5_12 ) bert_res.extend(res["""input_ids"""] ) assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) A_ : Union[str, Any] = [] for input_ids, chinese_word in zip(lowerCamelCase__ , lowerCamelCase__ ): A_ : str = [] for id in input_ids: A_ : List[str] = bert_tokenizer._convert_id_to_token(lowerCamelCase__ ) input_tokens.append(lowerCamelCase__ ) A_ : List[str] = add_sub_symbol(lowerCamelCase__ , lowerCamelCase__ ) A_ : Any = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCamelCase__ ): if token[:2] == "##": A_ : Tuple = token[2:] # save chinese tokens' pos if len(lowerCamelCase__ ) == 1 and _is_chinese_char(ord(lowerCamelCase__ ) ): ref_id.append(lowerCamelCase__ ) ref_ids.append(lowerCamelCase__ ) assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) return ref_ids def a ( lowerCamelCase__ ): '''simple docstring''' with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: A_ : Any = f.readlines() A_ : Any = [line.strip() for line in data if len(lowerCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ : Dict = LTP(args.ltp ) # faster in GPU device A_ : Union[str, Any] = BertTokenizer.from_pretrained(args.bert ) A_ : Any = prepare_ref(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: A_ : List[str] = [json.dumps(lowerCamelCase__ ) + """\n""" for ref in ref_ids] f.writelines(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Any = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) lowerCamelCase :Optional[Any] = parser.parse_args() main(args)
667
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCamelCase :int = logging.getLogger(__name__) lowerCamelCase :List[Any] = 5_0 # max width of layer names lowerCamelCase :List[Any] = 7_0 # max width of quantizer names def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" ) group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" ) group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" ) group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" ) group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" ) group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" ) group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" ) group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" ) group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" ) group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" ) group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" ) group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" ) group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" ) group.add_argument( """--recalibrate-weights""" , action="""store_true""" , help=( """recalibrate weight amaxes by taking the max of the weights.""" """ amaxes will be computed with the current quantization granularity (axis).""" ) , ) def a ( lowerCamelCase__ ): '''simple docstring''' if args.calibrator == "max": A_ : Union[str, Any] = """max""" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("""Specify --percentile when using percentile calibrator""" ) A_ : int = """histogram""" elif args.calibrator == "mse": A_ : Dict = """histogram""" else: raise ValueError(f'Invalid calibrator {args.calibrator}' ) A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ ) A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ): '''simple docstring''' logger.info("""Configuring Model for Quantization""" ) logger.info(f'using quantization package {pytorch_quantization.__file__}' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ ) if args.quant_disable: set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ ) if args.recalibrate_weights: recalibrate_weights(lowerCamelCase__ ) if args.fuse_qkv: fuse_qkv(lowerCamelCase__ , lowerCamelCase__ ) if args.clip_gelu: clip_gelu(lowerCamelCase__ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' logger.info("""Enabling Calibration""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'{name:80}: {module}' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' logger.info("""Loading calibrated amax""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("""percentile""" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): for mod in [qq, qk, qv]: if not hasattr(lowerCamelCase__ , """_amax""" ): print(""" WARNING: NO AMAX BUFFER""" ) return A_ : List[Any] = qq._amax.detach().item() A_ : Optional[int] = qk._amax.detach().item() A_ : Dict = qv._amax.detach().item() A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) qq._amax.fill_(lowerCamelCase__ ) qk._amax.fill_(lowerCamelCase__ ) qv._amax.fill_(lowerCamelCase__ ) logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' ) for name, mod in model.named_modules(): if name.endswith(""".attention.self""" ): logger.info(f'FUSE_QKV: {name:{name_width}}' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ): A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ ) A_ : Dict = mod._input_quantizer._amax.data.detach().item() logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None: A_ : Tuple = mod.weight.shape[0] A_ : Dict = mod._weight_quantizer._amax.detach() A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ): if not hasattr(mod.weight_quantizer , """_amax""" ): print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach() logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' ) A_ : str = amax def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ): '''simple docstring''' if ignore is None: A_ : int = [] elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = [ignore] A_ : Optional[Any] = 0 for name, mod in model.named_modules(): if not hasattr(lowerCamelCase__ , """weight""" ): continue A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) ) for name, mod in model.named_modules(): A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ ) A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ ) if not hasattr(lowerCamelCase__ , """weight""" ): continue if type(lowerCamelCase__ ) in ignore: continue if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]: continue A_ : Optional[int] = f'Act:{input_q.extra_repr()}' A_ : Dict = f'Wgt:{weight_q.extra_repr()}' A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}' if len(lowerCamelCase__ ) <= line_width: logger.info(lowerCamelCase__ ) else: logger.info(f'{name:{name_width}} {act_str}' ) logger.info(f'{" ":{name_width}} {wgt_str}' ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = 0 for name, mod in model.named_modules(): if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ): print(f'{name:80} {mod}' ) count += 1 print(f'{count} TensorQuantizers found in model' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if quantizer_mod is not None: assert hasattr(lowerCamelCase__ , lowerCamelCase__ ) setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: logger.warning(f'{name} has no {quantizer}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}' for k, v in kwargs.items(): s += f' {k}={v}' if which in ["input", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) if which in ["weight", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) elif name.endswith("""_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): A_ : Dict = f'Warning: changing {name:{name_width}}' for k, v in kwargs.items(): s += f' {k}={v}' setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ )
667
1
'''simple docstring''' class _lowerCAmelCase : def __init__(self ): A_ : List[Any] = """""" A_ : List[Any] = """""" A_ : Optional[Any] = [] def _a (self , lowercase , lowercase ): if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: A_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: A_ : List[Any] = self.__min_dist_top_down_dp(lowercase , n - 1 ) A_ : List[str] = self.__min_dist_top_down_dp(m - 1 , lowercase ) A_ : Dict = self.__min_dist_top_down_dp(m - 1 , n - 1 ) A_ : Optional[int] = 1 + min(lowercase , lowercase , lowercase ) return self.dp[m][n] def _a (self , lowercase , lowercase ): A_ : Optional[Any] = worda A_ : Dict = worda A_ : Any = [[-1 for _ in range(len(lowercase ) )] for _ in range(len(lowercase ) )] return self.__min_dist_top_down_dp(len(lowercase ) - 1 , len(lowercase ) - 1 ) def _a (self , lowercase , lowercase ): A_ : Optional[Any] = worda A_ : Any = worda A_ : List[str] = len(lowercase ) A_ : int = len(lowercase ) A_ : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty A_ : List[Any] = j elif j == 0: # second string is empty A_ : str = i elif worda[i - 1] == worda[j - 1]: # last characters are equal A_ : str = self.dp[i - 1][j - 1] else: A_ : int = self.dp[i][j - 1] A_ : Tuple = self.dp[i - 1][j] A_ : str = self.dp[i - 1][j - 1] A_ : List[str] = 1 + min(lowercase , lowercase , lowercase ) return self.dp[m][n] if __name__ == "__main__": lowerCamelCase :List[Any] = EditDistance() print('''****************** Testing Edit Distance DP Algorithm ******************''') print() lowerCamelCase :Dict = input('''Enter the first string: ''').strip() lowerCamelCase :Tuple = input('''Enter the second string: ''').strip() print() print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}") print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}") print() print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
667
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : List[Any] = 0 @slow def _a (self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) def _a (self ): A_ : str = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def _a (self ): A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) # Check that tokenizer_type ≠ model_type A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) @require_tokenizers def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" ) self.assertIsInstance(lowercase , lowercase ) def _a (self ): with pytest.raises(lowercase ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) if isinstance(lowercase , lowercase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase ) else: self.assertEqual(tokenizer.do_lower_case , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def _a (self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai A_ : List[str] = TOKENIZER_MAPPING.values() A_ : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(lowercase ) @require_tokenizers def _a (self ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase ) @require_tokenizers def _a (self ): A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase ) A_ : List[Any] = """Hello, world. How are you?""" A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase ) A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def _a (self ): A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(lowercase ) , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def _a (self ): A_ : Any = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(lowercase , lowercase ) def _a (self ): # Check we can load the tokenizer config of an online model. A_ : Tuple = get_tokenizer_config("""bert-base-cased""" ) A_ : Any = config.pop("""_commit_hash""" , lowercase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(lowercase , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. A_ : List[Any] = get_tokenizer_config(lowercase ) self.assertDictEqual(lowercase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. A_ : int = AutoTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Dict = get_tokenizer_config(lowercase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) A_ : Tuple = CustomTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) # Can register in two steps AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: A_ : str = BertTokenizerFast.from_pretrained(lowercase ) bert_tokenizer.save_pretrained(lowercase ) A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase ): A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase ): A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : str = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def _a (self ): class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = False class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = NewTokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = False try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # If remote code is not set, the default is to use local A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. A_ : int = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : List[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): A_ : Dict = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : Optional[int] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def _a (self ): with self.assertRaisesRegex( lowercase , """bert-base is not a local folder and is not a valid model identifier""" ): A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" ) def _a (self ): with self.assertRaisesRegex( lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" ) def _a (self ): # Make sure we have cached the tokenizer. A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
667
1
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def a ( lowerCamelCase__=32 , lowerCamelCase__=10 , lowerCamelCase__=1_00 , lowerCamelCase__=10_26 , lowerCamelCase__=True , lowerCamelCase__="data/tokenized_stories_train_wikitext103.jbl" , lowerCamelCase__="igf_context_pairs.jbl" , ): '''simple docstring''' set_seed(3 ) # generate train_data and objective_set A_, A_ : Any = generate_datasets( lowerCamelCase__ , lowerCamelCase__ , number=lowerCamelCase__ , min_len=10_26 , trim=lowerCamelCase__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? A_ : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model A_ : Any = load_gpta("""gpt2""" ).to(lowerCamelCase__ ) print("""computing perplexity on objective set""" ) A_ : Tuple = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).item() print("""perplexity on objective set:""" , lowerCamelCase__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def a ( lowerCamelCase__ , lowerCamelCase__=15 , lowerCamelCase__=1_28 , lowerCamelCase__=1_00 , lowerCamelCase__="igf_model.pt" , ): '''simple docstring''' set_seed(42 ) # Load pre-trained model A_ : Union[str, Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model A_ : Tuple = SecondaryLearner(lowerCamelCase__ ) # Train secondary learner A_ : int = train_secondary_learner( lowerCamelCase__ , lowerCamelCase__ , max_epochs=lowerCamelCase__ , batch_size=lowerCamelCase__ , eval_freq=1_00 , igf_model_path=lowerCamelCase__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=32 , lowerCamelCase__=10_00 , lowerCamelCase__=16 , lowerCamelCase__=1.0 , lowerCamelCase__=recopy_gpta , lowerCamelCase__=None , lowerCamelCase__=10 , lowerCamelCase__="gpt2_finetuned.pt" , ): '''simple docstring''' A_ : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) A_ : List[Any] = RandomSampler(lowerCamelCase__ ) A_ : Dict = DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ ) A_ : Union[str, Any] = max_steps // (len(lowerCamelCase__ )) + 1 A_ : Optional[Any] = 0 A_ : str = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCamelCase__ ) A_, A_, A_ : Dict = recopy_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) model.train() if secondary_learner is not None: secondary_learner.to(lowerCamelCase__ ) secondary_learner.eval() A_ : Any = [] A_ : List[str] = 0 A_ : Any = [] A_ : Any = [] # Compute the performance of the transformer model at the beginning A_ : Dict = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) test_perps.append(lowerCamelCase__ ) print("""Test perplexity, step""" , lowerCamelCase__ , """:""" , lowerCamelCase__ ) for epoch in range(int(lowerCamelCase__ ) ): for step, example in enumerate(lowerCamelCase__ ): torch.cuda.empty_cache() A_ : Dict = random.randint(0 , example.size(2 ) - context_len - 1 ) A_ : Tuple = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() A_ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) A_ : Any = True if secondary_learner is not None: A_ : Dict = secondary_learner.forward( torch.tensor(lowerCamelCase__ , dtype=torch.long , device=lowerCamelCase__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(lowerCamelCase__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: A_ : Optional[Any] = -1 if predicted_q < threshold: A_ : Optional[int] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) A_ : List[Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() A_ : Dict = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: A_ : List[str] = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) test_perps.append(lowerCamelCase__ ) print("""Test perplexity, step""" , lowerCamelCase__ , """:""" , lowerCamelCase__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , lowerCamelCase__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def a ( ): '''simple docstring''' A_ : Optional[int] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=lowerCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=1_00 , type=lowerCamelCase__ , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=1_00 , type=lowerCamelCase__ , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=10_00 , type=lowerCamelCase__ , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=1_28 , type=lowerCamelCase__ , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=lowerCamelCase__ , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=lowerCamelCase__ , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=1_00 , type=lowerCamelCase__ , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=10_26 , type=lowerCamelCase__ , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=lowerCamelCase__ , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=lowerCamelCase__ , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=lowerCamelCase__ , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=lowerCamelCase__ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner A_ : int = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner A_ : Tuple = training_secondary_learner( lowerCamelCase__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model A_ : Tuple = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model A_, A_ : int = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_00 , min_len=10_26 , trim=lowerCamelCase__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=lowerCamelCase__ , secondary_learner=lowerCamelCase__ , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
667
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) A_ : int = number_of_bytes // partitions A_ : Union[str, Any] = [] for i in range(lowerCamelCase__ ): A_ : Dict = i * bytes_per_partition + 1 A_ : Tuple = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = ['image_processor', 'feature_extractor'] __SCREAMING_SNAKE_CASE : List[Any] = 'TvltImageProcessor' __SCREAMING_SNAKE_CASE : Union[str, Any] = 'TvltFeatureExtractor' def __init__(self , lowercase , lowercase ): super().__init__(image_processor=lowercase , feature_extractor=lowercase ) A_ : Union[str, Any] = image_processor A_ : Tuple = feature_extractor def __call__(self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=False , lowercase=False , *lowercase , **lowercase , ): if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) A_ : List[str] = None if images is not None: A_ : int = self.image_processor(lowercase , mask_pixel=lowercase , *lowercase , **lowercase ) if images_mixed is not None: A_ : str = self.image_processor(lowercase , is_mixed=lowercase , *lowercase , **lowercase ) if audio is not None: A_ : Optional[int] = self.feature_extractor( lowercase , *lowercase , sampling_rate=lowercase , mask_audio=lowercase , **lowercase ) A_ : List[str] = {} if audio is not None: output_dict.update(lowercase ) if images is not None: output_dict.update(lowercase ) if images_mixed_dict is not None: output_dict.update(lowercase ) return output_dict @property def _a (self ): A_ : List[str] = self.image_processor.model_input_names A_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
667
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Any = logging.get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' ) A_ : Union[str, Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : str = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Union[str, Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = dct.pop(lowerCamelCase__ ) A_ : Optional[int] = val def a ( lowerCamelCase__ ): '''simple docstring''' if "handwritten" in checkpoint_url: A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ ) A_ : int = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : List[str] = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Union[str, Any] = 10_24 A_ : List[Any] = 40_96 A_ : Dict = 24 A_ : List[str] = 16 A_ : Union[str, Any] = 10_24 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Optional[Any] = False A_ : Union[str, Any] = """relu""" A_ : List[str] = 10_24 A_ : Tuple = True A_ : Tuple = False A_ : List[str] = False # load HuggingFace model A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ ) A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ ) A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) model.eval() # load state_dict of original model, rename some keys A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""] A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ ) if key.startswith("""decoder""" ) and "output_projection" not in key: A_ : str = val else: A_ : List[str] = val # load state dict model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image A_ : str = ViTImageProcessor(size=encoder_config.image_size ) A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" ) A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values # verify logits A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ) A_ : Dict = outputs.logits A_ : str = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : Any = torch.tensor( [-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] ) elif "trocr-base-printed" in checkpoint_url: A_ : List[Any] = torch.tensor( [-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase :Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
667
1
'''simple docstring''' from math import factorial, radians def a ( lowerCamelCase__ , lowerCamelCase__ = 18 , lowerCamelCase__ = 10 ): '''simple docstring''' A_ : Dict = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians A_ : Optional[Any] = radians(lowerCamelCase__ ) A_ : List[Any] = angle_in_radians A_ : Dict = 3 A_ : Optional[int] = -1 for _ in range(lowerCamelCase__ ): result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase__ ) A_ : List[str] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": __import__('''doctest''').testmod()
667
'''simple docstring''' print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
667
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase :Union[str, Any] = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Union[str, Any] = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :List[str] = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowerCamelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
667
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY) def a ( ): '''simple docstring''' A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ ) # assert negative_img array for at least one True assert negative_img.any() def a ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def a ( ): '''simple docstring''' A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a ( ): '''simple docstring''' A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() A_ : List[Any] = canny.canny(lowerCamelCase__ ) # assert canny array for at least one True assert canny_array.any() def a ( ): '''simple docstring''' assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all() def a ( ): '''simple docstring''' A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ ) assert res.any() def a ( ): '''simple docstring''' assert med.median_filter(lowerCamelCase__ , 3 ).any() def a ( ): '''simple docstring''' A_, A_ : int = sob.sobel_filter(lowerCamelCase__ ) assert grad.any() and theta.any() def a ( ): '''simple docstring''' A_ : int = sp.make_sepia(lowerCamelCase__ , 20 ) assert sepia.all() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def a ( ): '''simple docstring''' A_ : int = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 ) # Test for get_neighbors_pixel function() return not None A_ : str = 0 A_ : str = 0 A_ : Dict = image[x_coordinate][y_coordinate] A_ : Optional[Any] = lbp.get_neighbors_pixel( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A_ : str = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert lbp_image.any()
667
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'philschmid/bart-large-cnn-samsum' __SCREAMING_SNAKE_CASE : Tuple = ( 'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ' 'and returns a summary of the text.' ) __SCREAMING_SNAKE_CASE : Any = 'summarizer' __SCREAMING_SNAKE_CASE : int = AutoTokenizer __SCREAMING_SNAKE_CASE : Tuple = AutoModelForSeqaSeqLM __SCREAMING_SNAKE_CASE : List[str] = ['text'] __SCREAMING_SNAKE_CASE : Optional[int] = ['text'] def _a (self , lowercase ): return self.pre_processor(lowercase , return_tensors="""pt""" , truncation=lowercase ) def _a (self , lowercase ): return self.model.generate(**lowercase )[0] def _a (self , lowercase ): return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
667
'''simple docstring''' from importlib import import_module from .logging import get_logger lowerCamelCase :Dict = get_logger(__name__) class _lowerCAmelCase : def __init__(self , lowercase , lowercase=None ): A_ : Optional[int] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , lowercase , getattr(lowercase , lowercase ) ) A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Dict = [] def __init__(self , lowercase , lowercase , lowercase , lowercase=None ): A_ : Union[str, Any] = obj A_ : Optional[int] = target A_ : Optional[Any] = new A_ : Optional[Any] = target.split(""".""" )[0] A_ : Tuple = {} A_ : Optional[int] = attrs or [] def __enter__(self ): *A_, A_ : Optional[Any] = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase ) ): try: A_ : Any = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): A_ : int = getattr(self.obj , lowercase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): A_ : str = obj_attr # patch at top level setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) ) A_ : Optional[Any] = getattr(self.obj , lowercase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) ) A_ : Dict = getattr(lowercase , lowercase ) # finally set the target attribute setattr(lowercase , lowercase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowercase ) is attr_value: A_ : Dict = getattr(self.obj , lowercase ) setattr(self.obj , lowercase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" A_ : int = globals()["""__builtins__"""][target_attr] setattr(self.obj , lowercase , self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__(self , *lowercase ): for attr in list(self.original ): setattr(self.obj , lowercase , self.original.pop(lowercase ) ) def _a (self ): self.__enter__() self._active_patches.append(self ) def _a (self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
667
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase :int = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Tuple = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Dict = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Union[str, Any] = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys lowerCamelCase :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase :int = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :List[Any] = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Any = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCamelCase :str = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , *lowercase , **lowercase ): warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , lowercase , ) super().__init__(*lowercase , **lowercase )
667
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ): super().__init__() self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase ) # create a imagenet -> id dictionary for easier use A_ : str = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): A_ : Optional[Any] = int(lowercase ) A_ : List[Any] = dict(sorted(self.labels.items() ) ) def _a (self , lowercase ): if not isinstance(lowercase , lowercase ): A_ : Optional[int] = list(lowercase ) for l in label: if l not in self.labels: raise ValueError( F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ): A_ : Tuple = len(lowercase ) A_ : Optional[Any] = self.transformer.config.sample_size A_ : int = self.transformer.config.in_channels A_ : Optional[int] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , ) A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 ) A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device ) A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: A_ : List[Any] = latent_model_input[: len(lowercase ) // 2] A_ : List[str] = torch.cat([half, half] , dim=0 ) A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase ) A_ : Tuple = t if not torch.is_tensor(lowercase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) A_ : Optional[Any] = latent_model_input.device.type == """mps""" if isinstance(lowercase , lowercase ): A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa else: A_ : List[Any] = torch.intaa if is_mps else torch.intaa A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: A_ : List[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A_ : int = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output A_ : List[Any] = self.transformer( lowercase , timestep=lowercase , class_labels=lowercase ).sample # perform guidance if guidance_scale > 1: A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 ) A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps) A_ : str = torch.cat([half_eps, half_eps] , dim=0 ) A_ : Optional[int] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: A_, A_ : int = torch.split(lowercase , lowercase , dim=1 ) else: A_ : Optional[int] = noise_pred # compute previous image: x_t -> x_t-1 A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample if guidance_scale > 1: A_, A_ : int = latent_model_input.chunk(2 , dim=0 ) else: A_ : Union[str, Any] = latent_model_input A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents A_ : List[Any] = self.vae.decode(lowercase ).sample A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A_ : int = self.numpy_to_pil(lowercase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowercase )
667
1
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ): '''simple docstring''' A_ : int = right or len(lowerCamelCase__ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(lowerCamelCase__ , lowerCamelCase__ , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
667
'''simple docstring''' import math lowerCamelCase :int = 1_0 lowerCamelCase :List[Any] = 7 lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def a ( lowerCamelCase__ = 20 ): '''simple docstring''' A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ ) A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total) return f'{result:.9f}' if __name__ == "__main__": print(solution(2_0))
667
1
'''simple docstring''' import math import random def a ( lowerCamelCase__ , lowerCamelCase__ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowerCamelCase :str = 0.02 def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = float(2 * (random.randint(1 , 1_00 )) - 1 ) for _ in range(lowerCamelCase__ ): # Forward propagation A_ : List[Any] = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? A_ : Union[str, Any] = (expected / 1_00) - layer_a # Error delta A_ : Any = layer_1_error * sigmoid_function(lowerCamelCase__ , lowerCamelCase__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase :List[Any] = int(input('''Expected value: ''')) lowerCamelCase :Any = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
667
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :List[Any] = logging.get_logger(__name__) lowerCamelCase :Union[str, Any] = { '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model' __SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values'] __SCREAMING_SNAKE_CASE : List[Any] = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ): A_ : Tuple = vocab_size A_ : str = hidden_size A_ : Optional[Any] = d_kv A_ : Tuple = d_ff A_ : str = num_layers A_ : int = num_heads A_ : Dict = relative_attention_num_buckets A_ : Optional[Any] = relative_attention_max_distance A_ : Dict = dropout_rate A_ : Optional[int] = layer_norm_epsilon A_ : Dict = initializer_factor A_ : Any = use_cache A_ : int = eos_token_id A_ : Tuple = decoder_start_token_id # for backwards compatibility A_ : str = dense_act_fn super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , ) @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : int = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model' def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ): super().__init__(**lowercase ) A_ : List[str] = hidden_size A_ : Optional[int] = patch_embed_hidden_size A_ : Any = d_ff A_ : str = dropout_rate A_ : Dict = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : List[Any] = initializer_range A_ : List[str] = initializer_factor A_ : Dict = attention_dropout A_ : Optional[Any] = layer_norm_eps A_ : Optional[Any] = dense_act_fn A_ : List[Any] = seq_len A_ : Tuple = relative_attention_num_buckets A_ : Any = relative_attention_max_distance A_ : int = d_kv @classmethod def _a (cls , lowercase , **lowercase ): cls._set_token_in_kwargs(lowercase ) A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": A_ : Tuple = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase , **lowercase ) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Any = 'pix2struct' __SCREAMING_SNAKE_CASE : List[Any] = True def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ): super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase ) if text_config is None: A_ : Optional[Any] = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) A_ : Tuple = PixaStructTextConfig(**lowercase ) A_ : List[str] = PixaStructVisionConfig(**lowercase ) A_ : Dict = self.text_config.decoder_start_token_id A_ : Union[str, Any] = self.text_config.pad_token_id A_ : str = self.text_config.eos_token_id A_ : List[str] = initializer_factor A_ : int = initializer_range A_ : Tuple = self.initializer_range A_ : Tuple = self.initializer_range A_ : List[str] = is_vqa @classmethod def _a (cls , lowercase , lowercase , **lowercase ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase ) def _a (self ): A_ : Optional[Any] = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : List[Any] = self.vision_config.to_dict() A_ : List[str] = self.__class__.model_type return output
667
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = LongformerTokenizer __SCREAMING_SNAKE_CASE : List[Any] = True __SCREAMING_SNAKE_CASE : str = LongformerTokenizerFast __SCREAMING_SNAKE_CASE : int = True def _a (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ : int = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] A_ : Union[str, Any] = dict(zip(lowercase , range(len(lowercase ) ) ) ) A_ : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] A_ : Dict = {"""unk_token""": """<unk>"""} A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowercase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowercase ) ) def _a (self , **lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def _a (self , **lowercase ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def _a (self , lowercase ): A_ : Any = """lower newer""" A_ : List[Any] = """lower newer""" return input_text, output_text def _a (self ): A_ : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A_ : List[Any] = """lower newer""" A_ : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] A_ : Any = tokenizer.tokenize(lowercase ) # , add_prefix_space=True) self.assertListEqual(lowercase , lowercase ) A_ : Optional[int] = tokens + [tokenizer.unk_token] A_ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) def _a (self ): A_ : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowercase ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowercase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _a (self ): A_ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) A_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase ) A_ : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase ) A_ : str = tokenizer.encode( """sequence builders""" , add_special_tokens=lowercase , add_prefix_space=lowercase ) A_ : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase , add_prefix_space=lowercase ) A_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : Any = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _a (self ): A_ : int = self.get_tokenizer() A_ : Union[str, Any] = """Encode this sequence.""" A_ : Optional[int] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments A_ : Tuple = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase ) A_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase , lowercase ) A_ : Any = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase ) A_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase , lowercase ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) A_ : int = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase , lowercase ) # Testing spaces after special tokens A_ : int = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space A_ : str = tokenizer.convert_tokens_to_ids(lowercase ) A_ : str = """Encode <mask> sequence""" A_ : Any = """Encode <mask>sequence""" A_ : Union[str, Any] = tokenizer.encode(lowercase ) A_ : Tuple = encoded.index(lowercase ) A_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase , lowercase ) A_ : int = tokenizer.encode(lowercase ) A_ : Union[str, Any] = encoded.index(lowercase ) A_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase , lowercase ) def _a (self ): pass def _a (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) A_ : List[str] = self.tokenizer_class.from_pretrained(lowercase , **lowercase ) A_ : List[Any] = """A, <mask> AllenNLP sentence.""" A_ : Dict = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase ) A_ : int = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) A_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) A_ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _a (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A_ : Dict = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase ) A_ : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A_ : List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowercase ) self.assertEqual(post_processor_state["""add_prefix_space"""] , lowercase ) self.assertEqual(post_processor_state["""trim_offsets"""] , lowercase ) def _a (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A_ : Tuple = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` A_ : Optional[int] = F'{text_of_1_token} {text_of_1_token}' A_ : Tuple = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase ) A_ : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , ) A_ : int = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase ) A_ : List[str] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , ) A_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase ) A_ : Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , ) A_ : List[Any] = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase ) A_ : Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , ) A_ : str = F' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A_ : int = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase ) A_ : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , ) A_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase ) A_ : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , ) A_ : Any = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase ) A_ : Dict = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
667
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available lowerCamelCase :Union[str, Any] = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :int = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Tuple = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
1
'''simple docstring''' import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLTokenizer __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : Dict = False def _a (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ : int = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] A_ : Optional[int] = dict(zip(lowercase , range(len(lowercase ) ) ) ) A_ : Optional[int] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] A_ : Any = {"""unk_token""": """<unk>"""} A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowercase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowercase ) ) def _a (self , **lowercase ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def _a (self , lowercase ): A_ : Tuple = """adapt react readapt apt""" A_ : List[Any] = """adapt react readapt apt""" return input_text, output_text def _a (self ): A_ : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A_ : str = """adapt react readapt apt""" A_ : List[Any] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() A_ : str = tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : Optional[int] = tokens + [tokenizer.unk_token] A_ : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
667
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer'] __SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor' __SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__(self , lowercase=None , lowercase=None , **lowercase ): A_ : Dict = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase , ) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase , lowercase ) def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) A_ : Dict = features["""words"""] A_ : Optional[int] = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) # add pixel values A_ : List[Any] = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] ) A_ : Optional[int] = images return encoded_inputs def _a (self , lowercase , lowercase ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image A_ : str = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase ) != len(lowercase ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" F' {len(lowercase )} and {len(lowercase )}' ) return images_with_overflow def _a (self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def _a (self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property def _a (self ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _a (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , ) return self.image_processor_class @property def _a (self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , ) return self.image_processor
667
1
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCamelCase :Any = '''0.12''' # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def _a (cls ): A_ : Tuple = TOKEN HfFolder.save_token(lowercase ) @classmethod def _a (cls ): try: delete_repo(token=cls._token , repo_id="""test-model-flax""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" ) except HTTPError: pass def _a (self ): A_ : int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) A_ : Union[str, Any] = FlaxBertModel(lowercase ) model.push_to_hub("""test-model-flax""" , use_auth_token=self._token ) A_ : Dict = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' ) A_ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) A_ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ : Any = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' ) # Reset repo delete_repo(token=self._token , repo_id="""test-model-flax""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase , repo_id="""test-model-flax""" , push_to_hub=lowercase , use_auth_token=self._token ) A_ : str = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' ) A_ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) A_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ : List[Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' ) def _a (self ): A_ : Tuple = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) A_ : Optional[int] = FlaxBertModel(lowercase ) model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token ) A_ : Optional[Any] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) A_ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) A_ : str = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( lowercase , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowercase , use_auth_token=self._token ) A_ : List[Any] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) A_ : List[str] = flatten_dict(unfreeze(model.params ) ) A_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = True A_ : Tuple = flatten_dict(modela.params ) A_ : List[Any] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: A_ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) A_ : Tuple = FlaxBertModel(lowercase ) A_ : str = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowercase , lowercase ) ) with self.assertRaises(lowercase ): A_ : Tuple = FlaxBertModel.from_pretrained(lowercase ) A_ : str = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase ) self.assertTrue(check_models_equal(lowercase , lowercase ) ) def _a (self ): A_ : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) A_ : int = FlaxBertModel(lowercase ) A_ : Optional[Any] = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowercase , lowercase ) , max_shard_size="""10KB""" ) with self.assertRaises(lowercase ): A_ : Tuple = FlaxBertModel.from_pretrained(lowercase ) A_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase ) self.assertTrue(check_models_equal(lowercase , lowercase ) ) def _a (self ): A_ : int = """bert""" A_ : str = """hf-internal-testing/tiny-random-bert-subfolder""" with self.assertRaises(lowercase ): A_ : Union[str, Any] = FlaxBertModel.from_pretrained(lowercase ) A_ : Union[str, Any] = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase ) self.assertIsNotNone(lowercase ) def _a (self ): A_ : List[Any] = """bert""" A_ : str = """hf-internal-testing/tiny-random-bert-sharded-subfolder""" with self.assertRaises(lowercase ): A_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase ) A_ : str = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase ) self.assertIsNotNone(lowercase )
667
'''simple docstring''' from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCamelCase :Optional[int] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , **lowercase ): super().__init__(**lowercase ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) self.check_model_type(lowercase ) def _a (self , **lowercase ): A_ : str = {} A_ : Dict = {} A_ : str = {} # preprocess args if "points_per_batch" in kwargs: A_ : Dict = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: A_ : int = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: A_ : str = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: A_ : int = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: A_ : Any = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: A_ : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: A_ : Union[str, Any] = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: A_ : List[str] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: A_ : List[Any] = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ): return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase ) def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ): A_ : Tuple = load_image(lowercase ) A_ : int = self.image_processor.size["""longest_edge"""] A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": A_ : Optional[Any] = self.get_inference_context() with inference_context(): A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device ) A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) A_ : Tuple = image_embeddings A_ : Dict = grid_points.shape[1] A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , lowercase , lowercase ): A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :] A_ : List[Any] = input_labels[:, i : i + points_per_batch] A_ : Optional[Any] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ): A_ : Any = model_inputs.pop("""input_boxes""" ) A_ : str = model_inputs.pop("""is_last""" ) A_ : int = model_inputs.pop("""original_sizes""" ).tolist() A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist() A_ : List[str] = self.model(**lowercase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks A_ : Optional[int] = model_outputs["""pred_masks"""] A_ : Tuple = self.image_processor.post_process_masks( lowercase , lowercase , lowercase , lowercase , binarize=lowercase ) A_ : Union[str, Any] = model_outputs["""iou_scores"""] A_, A_, A_ : Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ): A_ : Tuple = [] A_ : Optional[Any] = [] A_ : str = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) A_ : Any = torch.cat(lowercase ) A_ : List[Any] = torch.cat(lowercase ) A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation( lowercase , lowercase , lowercase , lowercase ) A_ : int = defaultdict(lowercase ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowercase ) A_ : Optional[int] = {} if output_rle_mask: A_ : List[str] = rle_mask if output_bboxes_mask: A_ : Optional[int] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
667
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :List[str] = logging.get_logger(__name__) lowerCamelCase :str = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = 'data2vec-vision' def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ): super().__init__(**lowercase ) A_ : Optional[int] = hidden_size A_ : List[str] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : List[Any] = intermediate_size A_ : Dict = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : List[str] = initializer_range A_ : List[str] = layer_norm_eps A_ : Dict = image_size A_ : List[str] = patch_size A_ : Any = num_channels A_ : Dict = use_mask_token A_ : Dict = use_absolute_position_embeddings A_ : List[str] = use_relative_position_bias A_ : Tuple = use_shared_relative_position_bias A_ : List[Any] = layer_scale_init_value A_ : Union[str, Any] = drop_path_rate A_ : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) A_ : Any = out_indices A_ : int = pool_scales # auxiliary head attributes (semantic segmentation) A_ : List[Any] = use_auxiliary_head A_ : List[Any] = auxiliary_loss_weight A_ : Optional[int] = auxiliary_channels A_ : Optional[Any] = auxiliary_num_convs A_ : Union[str, Any] = auxiliary_concat_input A_ : str = semantic_loss_ignore_index class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = version.parse('1.11' ) @property def _a (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a (self ): return 1E-4
667
'''simple docstring''' from collections.abc import Callable import numpy as np def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) A_ : int = np.zeros((n + 1,) ) A_ : List[str] = ya A_ : Any = xa for k in range(lowerCamelCase__ ): A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) A_ : Optional[int] = y[k] + ( (step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' import itertools import math def a ( lowerCamelCase__ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a ( ): '''simple docstring''' A_ : Tuple = 2 while True: if is_prime(lowerCamelCase__ ): yield num num += 1 def a ( lowerCamelCase__ = 1_00_01 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , lowerCamelCase__ ) ) if __name__ == "__main__": print(F"{solution() = }")
667
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ ) if matches: A_ : Optional[Any] = float(matches[1] ) A_ : Union[str, Any] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". A_ : Optional[Any] = 10_01 A_ : Union[str, Any] = """imagenet-1k-id2label.json""" A_ : List[str] = """huggingface/label-files""" A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()} A_ : int = """background""" A_ : List[str] = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def a ( ): '''simple docstring''' A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): '''simple docstring''' A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ ) # Load 🤗 model A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor A_ : Any = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" ) A_ : List[str] = model(**lowerCamelCase__ ) A_ : Any = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: A_ : Any = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print("""Pushing to the hub...""" ) A_ : Union[str, Any] = """google/""" + model_name image_processor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase :str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
667
1
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = ProphetNetTokenizer __SCREAMING_SNAKE_CASE : Any = False def _a (self ): super().setUp() A_ : Optional[int] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _a (self , lowercase ): A_ : Dict = """UNwant\u00E9d,running""" A_ : str = """unwanted, running""" return input_text, output_text def _a (self ): A_ : Optional[Any] = self.tokenizer_class(self.vocab_file ) A_ : int = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 12, 10, 11] ) def _a (self ): A_ : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def _a (self ): A_ : List[Any] = BasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a (self ): A_ : str = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def _a (self ): A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a (self ): A_ : List[Any] = BasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a (self ): A_ : Dict = BasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a (self ): A_ : str = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a (self ): A_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a (self ): A_ : Tuple = BasicTokenizer(do_lower_case=lowercase , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def _a (self ): A_ : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] A_ : Any = {} for i, token in enumerate(lowercase ): A_ : List[Any] = i A_ : List[Any] = WordpieceTokenizer(vocab=lowercase , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) @require_torch def _a (self ): A_ : int = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) A_ : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] A_ : Dict = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] A_ : Optional[int] = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" ) self.assertIsInstance(lowercase , lowercase ) A_ : Any = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def _a (self ): self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def _a (self ): self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def _a (self ): self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) @slow def _a (self ): A_ : Any = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) A_ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase ) A_ : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase ) A_ : int = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
667
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowerCamelCase :List[str] = logging.get_logger(__name__) class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer' __SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer'] __SCREAMING_SNAKE_CASE : Tuple = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__(self , lowercase , lowercase=None ): super().__init__(lowercase ) A_ : Any = speaker_embeddings @classmethod def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: A_ : Any = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) A_ : str = None else: with open(lowercase ) as speaker_embeddings_json: A_ : List[str] = json.load(lowercase ) else: A_ : str = None A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase ) A_ : Optional[int] = {} A_ : Tuple = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": A_ : Union[str, Any] = self._load_voice_preset(lowercase ) A_ : Tuple = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , ) A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' ) A_ : str = tmp_dict with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def _a (self , lowercase = None , **lowercase ): A_ : List[Any] = self.speaker_embeddings[voice_preset] A_ : Optional[Any] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) A_ : int = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) A_ : Tuple = np.load(lowercase ) return voice_preset_dict def _a (self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): A_ : Optional[int] = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ): A_ : Optional[int] = voice_preset + """.npz""" A_ : Any = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase ) A_ : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: A_ : Union[str, Any] = voice_preset return encoded_text
667
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase :str = 1_6 lowerCamelCase :List[Any] = 3_2 def a ( lowerCamelCase__ , lowerCamelCase__ = 16 ): '''simple docstring''' A_ : Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" ) A_ : int = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCamelCase__ ): # max_length=None => use the model max length (it's actually the default) A_ : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Optional[Any] = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : Tuple = 16 elif accelerator.mixed_precision != "no": A_ : Tuple = 8 else: A_ : int = None return tokenizer.pad( lowerCamelCase__ , padding="""longest""" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. A_ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) A_ : Any = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase :List[str] = mocked_dataloaders # noqa: F811 def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase__ ) == "1": A_ : Optional[Any] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: A_ : List[Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: A_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Union[str, Any] = config["""lr"""] A_ : List[str] = int(config["""num_epochs"""] ) A_ : Optional[int] = int(config["""seed"""] ) A_ : List[str] = int(config["""batch_size"""] ) set_seed(lowerCamelCase__ ) A_, A_ : Optional[int] = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ ) A_ : Tuple = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation A_ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : Dict = batch_size // MAX_GPU_BATCH_SIZE A_ : Dict = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : str = model.to(accelerator.device ) # Instantiate optimizer A_ : Optional[int] = AdamW(params=model.parameters() , lr=lowerCamelCase__ ) # Instantiate scheduler A_ : Optional[Any] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_, A_, A_, A_, A_ : Union[str, Any] = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: A_ : Tuple = os.path.split(lowerCamelCase__ )[-1].split(""".""" )[0] accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ ) # Now we train the model for epoch in range(lowerCamelCase__ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: A_ : Optional[Any] = 0 for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A_ : Union[str, Any] = model(**lowerCamelCase__ ) A_ : Optional[Any] = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() A_ : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): A_ : Union[str, Any] = model(**lowerCamelCase__ ) A_ : int = outputs.logits.argmax(dim=-1 ) A_, A_ : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCamelCase__ , references=lowerCamelCase__ , ) A_ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { """accuracy""": eval_metric["""accuracy"""], """f1""": eval_metric["""f1"""], """train_loss""": total_loss.item() / len(lowerCamelCase__ ), """epoch""": epoch, } , step=lowerCamelCase__ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def a ( ): '''simple docstring''' A_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=lowerCamelCase__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) A_ : Optional[int] = parser.parse_args() A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
667
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : Union[str, Any] = tempfile.mkdtemp() A_ : List[Any] = BlipImageProcessor() A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase ) processor.save_pretrained(self.tmpdirname ) def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor def _a (self , **lowercase ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer def _a (self ): shutil.rmtree(self.tmpdirname ) def _a (self ): A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a (self ): A_ : str = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 ) A_ : str = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase ) self.assertIsInstance(processor.qformer_tokenizer , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : List[str] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = self.prepare_image_inputs() A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" ) A_ : Dict = processor(images=lowercase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a (self ): A_ : List[Any] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : Any = self.get_qformer_tokenizer() A_ : List[str] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : str = """lower newer""" A_ : List[Any] = processor(text=lowercase ) A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase ) A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def _a (self ): A_ : int = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Any = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Optional[int] = """lower newer""" A_ : Optional[int] = self.prepare_image_inputs() A_ : Tuple = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(lowercase ): processor() def _a (self ): A_ : Dict = self.get_image_processor() A_ : str = self.get_tokenizer() A_ : Optional[int] = self.get_qformer_tokenizer() A_ : int = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : Optional[int] = processor.batch_decode(lowercase ) A_ : Dict = tokenizer.batch_decode(lowercase ) self.assertListEqual(lowercase , lowercase ) def _a (self ): A_ : Any = self.get_image_processor() A_ : Dict = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Optional[int] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = """lower newer""" A_ : Optional[Any] = self.prepare_image_inputs() A_ : Any = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
667
1
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' with open(lowerCamelCase__ ) as metadata_file: A_ : str = json.load(lowerCamelCase__ ) A_ : Any = LukeConfig(use_entity_aware_attention=lowerCamelCase__ , **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path A_ : str = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""module"""] # Load the entity vocab file A_ : List[str] = load_original_entity_vocab(lowerCamelCase__ ) # add an entry for [MASK2] A_ : List[Any] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 A_ : int = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks A_ : Optional[Any] = AddedToken("""<ent>""" , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) A_ : Union[str, Any] = AddedToken("""<ent2>""" , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , """tokenizer_config.json""" ) , """r""" ) as f: A_ : Union[str, Any] = json.load(lowerCamelCase__ ) A_ : Dict = """MLukeTokenizer""" with open(os.path.join(lowerCamelCase__ , """tokenizer_config.json""" ) , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[int] = MLukeTokenizer.from_pretrained(lowerCamelCase__ ) # Initialize the embeddings of the special tokens A_ : List[Any] = tokenizer.convert_tokens_to_ids(["""@"""] )[0] A_ : Any = tokenizer.convert_tokens_to_ids(["""#"""] )[0] A_ : Optional[Any] = state_dict["""embeddings.word_embeddings.weight"""] A_ : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 ) A_ : Any = word_emb[enta_init_index].unsqueeze(0 ) A_ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: A_ : Optional[Any] = state_dict[bias_name] A_ : List[str] = decoder_bias[ent_init_index].unsqueeze(0 ) A_ : Dict = decoder_bias[enta_init_index].unsqueeze(0 ) A_ : Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: A_ : Optional[int] = f'encoder.layer.{layer_index}.attention.self.' A_ : Union[str, Any] = state_dict[prefix + matrix_name] A_ : Dict = state_dict[prefix + matrix_name] A_ : List[str] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks A_ : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""] A_ : str = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 ) A_ : str = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' A_ : str = state_dict["""entity_predictions.bias"""] A_ : Optional[int] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 ) A_ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) A_ : List[str] = LukeForMaskedLM(config=lowerCamelCase__ ).eval() state_dict.pop("""entity_predictions.decoder.weight""" ) state_dict.pop("""lm_head.decoder.weight""" ) state_dict.pop("""lm_head.decoder.bias""" ) A_ : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )): A_ : Tuple = state_dict[key] else: A_ : Optional[int] = state_dict[key] A_, A_ : Optional[int] = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) if set(lowerCamelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(lowerCamelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs A_ : Optional[Any] = MLukeTokenizer.from_pretrained(lowerCamelCase__ , task="""entity_classification""" ) A_ : Optional[int] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).""" A_ : Union[str, Any] = (0, 9) A_ : List[Any] = tokenizer(lowerCamelCase__ , entity_spans=[span] , return_tensors="""pt""" ) A_ : str = model(**lowerCamelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base A_ : Union[str, Any] = torch.Size((1, 33, 7_68) ) A_ : Optional[Any] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base A_ : Optional[int] = torch.Size((1, 1, 7_68) ) A_ : Optional[int] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction A_ : List[str] = MLukeTokenizer.from_pretrained(lowerCamelCase__ ) A_ : List[Any] = """Tokyo is the capital of <mask>.""" A_ : Optional[int] = (24, 30) A_ : str = tokenizer(lowerCamelCase__ , entity_spans=[span] , return_tensors="""pt""" ) A_ : List[Any] = model(**lowerCamelCase__ ) A_ : List[str] = encoding["""input_ids"""][0].tolist() A_ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) ) A_ : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowerCamelCase__ ) A_ : int = outputs.entity_logits[0][0].argmax().item() A_ : Any = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(lowerCamelCase__ ) ) model.save_pretrained(lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""] A_ : Any = [json.loads(lowerCamelCase__ ) for line in open(lowerCamelCase__ )] A_ : Union[str, Any] = {} for entry in data: A_ : List[Any] = entry["""id"""] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: A_ : Optional[Any] = entity_id break A_ : str = f'{language}:{entity_name}' A_ : Tuple = entity_id return new_mapping if __name__ == "__main__": lowerCamelCase :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) lowerCamelCase :List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
667
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase :Optional[Any] = logging.get_logger(__name__) lowerCamelCase :Tuple = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str' def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ): super().__init__(**lowercase ) A_ : int = image_size A_ : List[str] = patch_size A_ : Tuple = num_channels A_ : List[str] = max_token_length A_ : int = num_character_labels A_ : str = num_bpe_labels A_ : Tuple = num_wordpiece_labels A_ : Optional[int] = hidden_size A_ : List[Any] = num_hidden_layers A_ : int = num_attention_heads A_ : Tuple = mlp_ratio A_ : str = distilled A_ : Union[str, Any] = layer_norm_eps A_ : str = drop_rate A_ : int = qkv_bias A_ : Dict = attn_drop_rate A_ : List[Any] = drop_path_rate A_ : Any = output_aa_attentions A_ : Union[str, Any] = initializer_range
667
1
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def a ( lowerCamelCase__ ): '''simple docstring''' if not sentence: return "" A_ : Union[str, Any] = dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
667
'''simple docstring''' import math from collections.abc import Callable def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : float = xa A_ : float = xa while True: if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ): raise ZeroDivisionError("""float division by zero, could not find root""" ) A_ : float = x_na - ( function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na A_ : Tuple = x_na A_ : List[Any] = x_na def a ( lowerCamelCase__ ): '''simple docstring''' return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
667
1
'''simple docstring''' import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def a ( lowerCamelCase__ ): '''simple docstring''' monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() ) @pytest.fixture def a ( lowerCamelCase__ ): '''simple docstring''' class _lowerCAmelCase : def __init__(self , lowercase ): A_ : str = metric_id class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : str = [MetricMock(__UpperCAmelCase ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def _a (self ): return self._metrics monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() ) @pytest.mark.parametrize( """func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if "tmp_path" in args: A_ : Any = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args ) with pytest.warns(lowerCamelCase__ , match="""https://huggingface.co/docs/evaluate""" ): func(*lowerCamelCase__ )
667
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase :Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values'] def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ): super().__init__(**lowercase ) A_ : Dict = size if size is not None else {"""shortest_edge""": 224} A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" ) A_ : str = do_resize A_ : str = size A_ : List[str] = resample A_ : Any = do_center_crop A_ : Union[str, Any] = crop_size A_ : List[Any] = do_rescale A_ : List[Any] = rescale_factor A_ : Dict = do_normalize A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Union[str, Any] = do_convert_rgb def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): A_ : Any = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): A_ : List[str] = do_resize if do_resize is not None else self.do_resize A_ : int = size if size is not None else self.size A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase ) A_ : int = resample if resample is not None else self.resample A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase ) A_ : str = do_rescale if do_rescale is not None else self.do_rescale A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize A_ : Any = image_mean if image_mean is not None else self.image_mean A_ : Any = image_std if image_std is not None else self.image_std A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : List[str] = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : int = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A_ : int = [to_numpy_array(lowercase ) for image in images] if do_resize: A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
667
1
'''simple docstring''' from collections.abc import Generator def a ( ): '''simple docstring''' A_, A_ : List[str] = 0, 1 while True: A_, A_ : List[Any] = b, a + b yield b def a ( lowerCamelCase__ = 10_00 ): '''simple docstring''' A_ : Optional[int] = 1 A_ : List[Any] = fibonacci_generator() while len(str(next(lowerCamelCase__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
667
'''simple docstring''' class _lowerCAmelCase : def __init__(self , lowercase , lowercase , lowercase ): A_ : List[str] = name A_ : Dict = value A_ : Optional[int] = weight def __repr__(self ): return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def _a (self ): return self.value def _a (self ): return self.name def _a (self ): return self.weight def _a (self ): return self.value / self.weight def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = [] for i in range(len(lowerCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ ) A_ : Any = [] A_, A_ : Tuple = 0.0, 0.0 for i in range(len(lowerCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def a ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase :str = logging.get_logger(__name__) lowerCamelCase :Optional[Any] = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = 'ibert' def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=False , lowercase="none" , **lowercase , ): super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A_ : Tuple = vocab_size A_ : Optional[Any] = hidden_size A_ : List[str] = num_hidden_layers A_ : int = num_attention_heads A_ : List[str] = hidden_act A_ : List[Any] = intermediate_size A_ : Optional[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : List[str] = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = initializer_range A_ : Any = layer_norm_eps A_ : Dict = position_embedding_type A_ : Optional[Any] = quant_mode A_ : Optional[int] = force_dequant class _lowerCAmelCase ( __UpperCAmelCase ): @property def _a (self ): if self.task == "multiple-choice": A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : List[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
667
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCamelCase :int = logging.getLogger(__name__) lowerCamelCase :List[Any] = 5_0 # max width of layer names lowerCamelCase :List[Any] = 7_0 # max width of quantizer names def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" ) group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" ) group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" ) group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" ) group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" ) group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" ) group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" ) group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" ) group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" ) group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" ) group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" ) group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" ) group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" ) group.add_argument( """--recalibrate-weights""" , action="""store_true""" , help=( """recalibrate weight amaxes by taking the max of the weights.""" """ amaxes will be computed with the current quantization granularity (axis).""" ) , ) def a ( lowerCamelCase__ ): '''simple docstring''' if args.calibrator == "max": A_ : Union[str, Any] = """max""" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("""Specify --percentile when using percentile calibrator""" ) A_ : int = """histogram""" elif args.calibrator == "mse": A_ : Dict = """histogram""" else: raise ValueError(f'Invalid calibrator {args.calibrator}' ) A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ ) A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ): '''simple docstring''' logger.info("""Configuring Model for Quantization""" ) logger.info(f'using quantization package {pytorch_quantization.__file__}' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ ) if args.quant_disable: set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ ) if args.recalibrate_weights: recalibrate_weights(lowerCamelCase__ ) if args.fuse_qkv: fuse_qkv(lowerCamelCase__ , lowerCamelCase__ ) if args.clip_gelu: clip_gelu(lowerCamelCase__ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ ): '''simple docstring''' logger.info("""Enabling Calibration""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'{name:80}: {module}' ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' logger.info("""Loading calibrated amax""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("""percentile""" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): for mod in [qq, qk, qv]: if not hasattr(lowerCamelCase__ , """_amax""" ): print(""" WARNING: NO AMAX BUFFER""" ) return A_ : List[Any] = qq._amax.detach().item() A_ : Optional[int] = qk._amax.detach().item() A_ : Dict = qv._amax.detach().item() A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) qq._amax.fill_(lowerCamelCase__ ) qk._amax.fill_(lowerCamelCase__ ) qv._amax.fill_(lowerCamelCase__ ) logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' ) for name, mod in model.named_modules(): if name.endswith(""".attention.self""" ): logger.info(f'FUSE_QKV: {name:{name_width}}' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ): A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ ) A_ : Dict = mod._input_quantizer._amax.data.detach().item() logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None: A_ : Tuple = mod.weight.shape[0] A_ : Dict = mod._weight_quantizer._amax.detach() A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' ) def a ( lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_weight_quantizer""" ): if not hasattr(mod.weight_quantizer , """_amax""" ): print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach() logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' ) A_ : str = amax def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ): '''simple docstring''' if ignore is None: A_ : int = [] elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = [ignore] A_ : Optional[Any] = 0 for name, mod in model.named_modules(): if not hasattr(lowerCamelCase__ , """weight""" ): continue A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) ) for name, mod in model.named_modules(): A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ ) A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ ) if not hasattr(lowerCamelCase__ , """weight""" ): continue if type(lowerCamelCase__ ) in ignore: continue if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]: continue A_ : Optional[int] = f'Act:{input_q.extra_repr()}' A_ : Dict = f'Wgt:{weight_q.extra_repr()}' A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}' if len(lowerCamelCase__ ) <= line_width: logger.info(lowerCamelCase__ ) else: logger.info(f'{name:{name_width}} {act_str}' ) logger.info(f'{" ":{name_width}} {wgt_str}' ) def a ( lowerCamelCase__ ): '''simple docstring''' A_ : int = 0 for name, mod in model.named_modules(): if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ): print(f'{name:80} {mod}' ) count += 1 print(f'{count} TensorQuantizers found in model' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if quantizer_mod is not None: assert hasattr(lowerCamelCase__ , lowerCamelCase__ ) setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) else: logger.warning(f'{name} has no {quantizer}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}' for k, v in kwargs.items(): s += f' {k}={v}' if which in ["input", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) if which in ["weight", "both"]: set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' for name, mod in model.named_modules(): if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) elif name.endswith("""_quantizer""" ): for n in names: if re.search(lowerCamelCase__ , lowerCamelCase__ ): A_ : Dict = f'Warning: changing {name:{name_width}}' for k, v in kwargs.items(): s += f' {k}={v}' setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) logger.info(lowerCamelCase__ )
667
1
'''simple docstring''' def a ( ): '''simple docstring''' for n in range(1 , 1_00_00_00 ): yield n * (n + 1) // 2 def a ( lowerCamelCase__ ): '''simple docstring''' A_ : str = 1 A_ : List[str] = 2 while i * i <= n: A_ : int = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def a ( ): '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(lowerCamelCase__ ) > 5_00 ) if __name__ == "__main__": print(solution())
667
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _lowerCAmelCase ( unittest.TestCase ): def _a (self ): A_ : List[Any] = 0 @slow def _a (self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(lowercase ) , 0 ) def _a (self ): A_ : str = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def _a (self ): A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) # Check that tokenizer_type ≠ model_type A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) @require_tokenizers def _a (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) ) A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" ) self.assertIsInstance(lowercase , lowercase ) def _a (self ): with pytest.raises(lowercase ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) if isinstance(lowercase , lowercase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase ) else: self.assertEqual(tokenizer.do_lower_case , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def _a (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def _a (self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai A_ : List[str] = TOKENIZER_MAPPING.values() A_ : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(lowercase ) @require_tokenizers def _a (self ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase ) @require_tokenizers def _a (self ): A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase ) A_ : List[Any] = """Hello, world. How are you?""" A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase ) A_ : List[Any] = tokenizer.tokenize(lowercase ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def _a (self ): A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(lowercase ) , lowercase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def _a (self ): A_ : Any = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Tuple = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def _a (self ): A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(lowercase , lowercase ) def _a (self ): # Check we can load the tokenizer config of an online model. A_ : Tuple = get_tokenizer_config("""bert-base-cased""" ) A_ : Any = config.pop("""_commit_hash""" , lowercase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(lowercase , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. A_ : List[Any] = get_tokenizer_config(lowercase ) self.assertDictEqual(lowercase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. A_ : int = AutoTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Dict = get_tokenizer_config(lowercase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) A_ : Tuple = CustomTokenizer.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _a (self ): try: AutoConfig.register("""custom""" , lowercase ) # Can register in two steps AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: A_ : str = BertTokenizerFast.from_pretrained(lowercase ) bert_tokenizer.save_pretrained(lowercase ) A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase ): A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase ): A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : str = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowercase ) A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def _a (self ): class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Dict = False class _lowerCAmelCase ( __UpperCAmelCase ): __SCREAMING_SNAKE_CASE : str = NewTokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = False try: AutoConfig.register("""custom""" , lowercase ) AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase ) AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase ) # If remote code is not set, the default is to use local A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. A_ : int = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) A_ : List[Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub A_ : Any = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a (self ): A_ : Dict = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version A_ : Optional[int] = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def _a (self ): with self.assertRaisesRegex( lowercase , """bert-base is not a local folder and is not a valid model identifier""" ): A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" ) def _a (self ): with self.assertRaisesRegex( lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" ) def _a (self ): # Make sure we have cached the tokenizer. A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
667
1
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = FileLock(str(tmpdir / """foo.lock""" ) ) A_ : List[str] = FileLock(str(tmpdir / """foo.lock""" ) ) A_ : Optional[Any] = 0.01 with locka.acquire(): with pytest.raises(lowerCamelCase__ ): A_ : List[Any] = time.time() locka.acquire(lowerCamelCase__ ) assert time.time() - _start > timeout def a ( lowerCamelCase__ ): '''simple docstring''' A_ : Dict = """a""" * 10_00 + """.lock""" A_ : Optional[int] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowerCamelCase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 A_ : List[str] = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowerCamelCase__ ): locka.acquire(0 )
667
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) A_ : int = number_of_bytes // partitions A_ : Union[str, Any] = [] for i in range(lowerCamelCase__ ): A_ : Dict = i * bytes_per_partition + 1 A_ : Tuple = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
667
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase :Dict = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = ['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[int] = ['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Any = [ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Optional[Any] = [ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase :Any = logging.get_logger(__name__) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' ) A_ : Union[str, Any] = in_proj_weight[ : encoder_config.hidden_size, : ] A_ : str = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A_ : Union[str, Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : str = dct.pop(lowerCamelCase__ ) A_ : Optional[int] = val def a ( lowerCamelCase__ ): '''simple docstring''' if "handwritten" in checkpoint_url: A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" ) return im @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ ) A_ : int = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A_ : List[str] = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder A_ : Union[str, Any] = 10_24 A_ : List[Any] = 40_96 A_ : Dict = 24 A_ : List[str] = 16 A_ : Union[str, Any] = 10_24 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A_ : Optional[Any] = False A_ : Union[str, Any] = """relu""" A_ : List[str] = 10_24 A_ : Tuple = True A_ : Tuple = False A_ : List[str] = False # load HuggingFace model A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ ) A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ ) A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) model.eval() # load state_dict of original model, rename some keys A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""] A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ ) if key.startswith("""decoder""" ) and "output_projection" not in key: A_ : str = val else: A_ : List[str] = val # load state dict model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image A_ : str = ViTImageProcessor(size=encoder_config.image_size ) A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" ) A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values # verify logits A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ) A_ : Dict = outputs.logits A_ : str = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: A_ : Optional[int] = torch.tensor( [-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] ) elif "trocr-large-handwritten" in checkpoint_url: A_ : Any = torch.tensor( [-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] ) elif "trocr-base-printed" in checkpoint_url: A_ : List[Any] = torch.tensor( [-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] ) elif "trocr-large-printed" in checkpoint_url: A_ : Optional[Any] = torch.tensor( [-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase :Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
667
1
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _lowerCAmelCase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes __SCREAMING_SNAKE_CASE : List[str] = [('size', ctypes.c_int), ('visible', ctypes.c_byte)] def a ( ): '''simple docstring''' if os.name == "nt": A_ : Any = CursorInfo() A_ : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) ) A_ : int = False ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def a ( ): '''simple docstring''' if os.name == "nt": A_ : List[Any] = CursorInfo() A_ : str = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) ) A_ : Tuple = True ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def a ( ): '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
667
'''simple docstring''' print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
667
1
'''simple docstring''' from typing import Any class _lowerCAmelCase : def __init__(self , lowercase ): A_ : int = data A_ : int = None class _lowerCAmelCase : def __init__(self ): A_ : int = None def _a (self ): A_ : Optional[Any] = self.head while temp is not None: print(temp.data , end=""" """ ) A_ : Dict = temp.next print() def _a (self , lowercase ): A_ : List[str] = Node(lowercase ) A_ : Tuple = self.head A_ : Any = new_node def _a (self , lowercase , lowercase ): if node_data_a == node_data_a: return else: A_ : Optional[Any] = self.head while node_a is not None and node_a.data != node_data_a: A_ : List[Any] = node_a.next A_ : Tuple = self.head while node_a is not None and node_a.data != node_data_a: A_ : Tuple = node_a.next if node_a is None or node_a is None: return A_, A_ : Any = node_a.data, node_a.data if __name__ == "__main__": lowerCamelCase :str = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
667
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY) def a ( ): '''simple docstring''' A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ ) # assert negative_img array for at least one True assert negative_img.any() def a ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def a ( ): '''simple docstring''' A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a ( ): '''simple docstring''' A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() A_ : List[Any] = canny.canny(lowerCamelCase__ ) # assert canny array for at least one True assert canny_array.any() def a ( ): '''simple docstring''' assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all() def a ( ): '''simple docstring''' A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ ) assert res.any() def a ( ): '''simple docstring''' assert med.median_filter(lowerCamelCase__ , 3 ).any() def a ( ): '''simple docstring''' A_, A_ : int = sob.sobel_filter(lowerCamelCase__ ) assert grad.any() and theta.any() def a ( ): '''simple docstring''' A_ : int = sp.make_sepia(lowerCamelCase__ , 20 ) assert sepia.all() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def a ( ): '''simple docstring''' A_ : int = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 ) # Test for get_neighbors_pixel function() return not None A_ : str = 0 A_ : str = 0 A_ : Dict = image[x_coordinate][y_coordinate] A_ : Optional[Any] = lbp.get_neighbors_pixel( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A_ : str = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) assert lbp_image.any()
667
1
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_, A_ : Any = set(lowerCamelCase__ ), [start] while stack: A_ : Tuple = stack.pop() explored.add(lowerCamelCase__ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(lowerCamelCase__ ) return explored lowerCamelCase :Union[str, Any] = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
667
'''simple docstring''' from importlib import import_module from .logging import get_logger lowerCamelCase :Dict = get_logger(__name__) class _lowerCAmelCase : def __init__(self , lowercase , lowercase=None ): A_ : Optional[int] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , lowercase , getattr(lowercase , lowercase ) ) A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module class _lowerCAmelCase : __SCREAMING_SNAKE_CASE : Dict = [] def __init__(self , lowercase , lowercase , lowercase , lowercase=None ): A_ : Union[str, Any] = obj A_ : Optional[int] = target A_ : Optional[Any] = new A_ : Optional[Any] = target.split(""".""" )[0] A_ : Tuple = {} A_ : Optional[int] = attrs or [] def __enter__(self ): *A_, A_ : Optional[Any] = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase ) ): try: A_ : Any = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): A_ : int = getattr(self.obj , lowercase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): A_ : str = obj_attr # patch at top level setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) ) A_ : Optional[Any] = getattr(self.obj , lowercase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) ) A_ : Dict = getattr(lowercase , lowercase ) # finally set the target attribute setattr(lowercase , lowercase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowercase ) is attr_value: A_ : Dict = getattr(self.obj , lowercase ) setattr(self.obj , lowercase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" A_ : int = globals()["""__builtins__"""][target_attr] setattr(self.obj , lowercase , self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__(self , *lowercase ): for attr in list(self.original ): setattr(self.obj , lowercase , self.original.pop(lowercase ) ) def _a (self ): self.__enter__() self._active_patches.append(self ) def _a (self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
667
1