code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowercase = logging.get_logger(__name__) def UpperCAmelCase ( A : Optional[Any] , A : Optional[int] ): '''simple docstring''' try: with open(A , 'rb' ) as flax_state_f: _UpperCAmelCase = from_bytes(A , flax_state_f.read() ) except UnpicklingError as e: try: with open(A ) as f: if f.read().startswith('version' ): raise OSError( 'You seem to have cloned a repository without having git-lfs installed. Please' ' install git-lfs and run `git lfs install` followed by `git lfs pull` in the' ' folder you cloned.' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(A , A ) def UpperCAmelCase ( A : str , A : Union[str, Any] ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( 'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights _UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , A ) ).values() if any(A ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) _UpperCAmelCase = jax.tree_util.tree_map( lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A ) _UpperCAmelCase = '' _UpperCAmelCase = flatten_dict(A , sep='.' ) _UpperCAmelCase = pt_model.state_dict() # keep track of unexpected & missing keys _UpperCAmelCase = [] _UpperCAmelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _UpperCAmelCase = flax_key_tuple.split('.' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] _UpperCAmelCase = jnp.transpose(A , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] _UpperCAmelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": _UpperCAmelCase = flax_key_tuple_array[:-1] + ['weight'] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(A ): _UpperCAmelCase = ( flax_key_tuple_string.replace('_0' , '.0' ) .replace('_1' , '.1' ) .replace('_2' , '.2' ) .replace('_3' , '.3' ) .replace('_4' , '.4' ) .replace('_5' , '.5' ) .replace('_6' , '.6' ) .replace('_7' , '.7' ) .replace('_8' , '.8' ) .replace('_9' , '.9' ) ) _UpperCAmelCase = '.'.join(A ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict _UpperCAmelCase = np.asarray(A ) if not isinstance(A , np.ndarray ) else flax_tensor _UpperCAmelCase = torch.from_numpy(A ) # remove from missing keys missing_keys.remove(A ) else: # weight is not expected by PyTorch model unexpected_keys.append(A ) pt_model.load_state_dict(A ) # re-transform missing_keys to list _UpperCAmelCase = list(A ) if len(A ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) if len(A ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ' use it for predictions and inference.' ) return pt_model
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCAmelCase ( A : str , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) _UpperCAmelCase = DatasetInfosDict.from_directory(A ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCAmelCase ( A : str , A : DatasetInfo ): '''simple docstring''' _UpperCAmelCase = str(A ) dataset_info.write_to_directory(A ) _UpperCAmelCase = DatasetInfo.from_directory(A ) assert dataset_info == reloaded assert os.path.exists(os.path.join(A , 'dataset_info.json' ) ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _UpperCAmelCase = dataset_info._to_yaml_dict() assert sorted(A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _UpperCAmelCase = yaml.safe_dump(A ) _UpperCAmelCase = yaml.safe_load(A ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = DatasetInfo() _UpperCAmelCase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase ( A : Tuple , A : DatasetInfosDict ): '''simple docstring''' _UpperCAmelCase = str(A ) dataset_infos_dict.write_to_directory(A ) _UpperCAmelCase = DatasetInfosDict.from_directory(A ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(A , 'README.md' ) )
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
1
"""simple docstring""" import os import jsonlines import numpy as np from tqdm import tqdm lowercase = 20_48 lowercase = 40_96 lowercase = 42 lowercase = os.environ.pop('''PROCESS_TRAIN''', '''false''') lowercase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4} def UpperCAmelCase ( A : str ): '''simple docstring''' def choose_first(A : int , A : Tuple=False ): assert isinstance(A , A ) if len(A ) == 1: _UpperCAmelCase = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: _UpperCAmelCase = {k: [a[k]] for k in a} if len(a['start_token'] ) > 0: break return a _UpperCAmelCase = {'id': example['id']} _UpperCAmelCase = example['annotations'] _UpperCAmelCase = annotation['yes_no_answer'] if 0 in yes_no_answer or 1 in yes_no_answer: _UpperCAmelCase = ['yes'] if 1 in yes_no_answer else ['no'] _UpperCAmelCase = _UpperCAmelCase = [] _UpperCAmelCase = _UpperCAmelCase = [] _UpperCAmelCase = ['<cls>'] else: _UpperCAmelCase = ['short'] _UpperCAmelCase = choose_first(annotation['short_answers'] ) if len(out['start_token'] ) == 0: # answer will be long if short is not available _UpperCAmelCase = ['long'] _UpperCAmelCase = choose_first(annotation['long_answer'] , is_long_answer=A ) _UpperCAmelCase = [] answer.update(A ) # disregard some samples if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]: _UpperCAmelCase = True else: _UpperCAmelCase = False _UpperCAmelCase = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text'] if not all(isinstance(answer[k] , A ) for k in cols ): raise ValueError('Issue in ID' , example['id'] ) return answer def UpperCAmelCase ( A : Dict , A : str=False ): '''simple docstring''' _UpperCAmelCase = _get_single_answer(A ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element _UpperCAmelCase = example['document']['tokens'] _UpperCAmelCase = [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) return { "context": " ".join(A ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples _UpperCAmelCase = ['start_token', 'end_token'] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 _UpperCAmelCase = example['document']['tokens'] _UpperCAmelCase = answer['start_token'] _UpperCAmelCase = answer['end_token'] _UpperCAmelCase = [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 _UpperCAmelCase = ' '.join(context[start_token:end_token] ) # checking above code if assertion: _UpperCAmelCase = doc['is_html'][answer['start_token'] : answer['end_token']] _UpperCAmelCase = doc['token'][answer['start_token'] : answer['end_token']] _UpperCAmelCase = ' '.join([old[i] for i in range(len(A ) ) if not is_html[i]] ) if new != old: print('ID:' , example['id'] ) print('New:' , A , end='\n' ) print('Old:' , A , end='\n\n' ) return { "context": " ".join(A ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def UpperCAmelCase ( A : int , A : Any , A : str=2048 , A : List[Any]=4096 , A : Any=True ): '''simple docstring''' _UpperCAmelCase = get_context_and_ans(A , assertion=A ) _UpperCAmelCase = out['answer'] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } _UpperCAmelCase = tokenizer(example['question']['text'] , out['context'] ).input_ids _UpperCAmelCase = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = input_ids[:q_len] _UpperCAmelCase = range(A , len(A ) , max_length - doc_stride ) for i in doc_start_indices: _UpperCAmelCase = i + max_length - q_len _UpperCAmelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['category'][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(A ), "end_token": [-100] * len(A ), "category": category, }, } _UpperCAmelCase = out['context'].split() _UpperCAmelCase = splitted_context[answer['end_token']] _UpperCAmelCase = len( tokenizer( ' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=A , ).input_ids ) _UpperCAmelCase = len( tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=A ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token _UpperCAmelCase = len(tokenizer(A , add_special_tokens=A ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 _UpperCAmelCase = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive _UpperCAmelCase = answer['start_token'] _UpperCAmelCase = answer['end_token'] if assertion: _UpperCAmelCase = tokenizer.decode(A ) if answer["span"] != new: print('ISSUE IN TOKENIZATION' ) print('OLD:' , answer['span'] ) print('NEW:' , A , end='\n\n' ) if len(A ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } _UpperCAmelCase = input_ids[:q_len] _UpperCAmelCase = range(A , len(A ) , max_length - doc_stride ) _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] # null, yes, no, long, short for i in doc_start_indices: _UpperCAmelCase = i + max_length - q_len _UpperCAmelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: _UpperCAmelCase = start_token - i + q_len _UpperCAmelCase = end_token - i + q_len answers_category.append(answer['category'][0] ) # ["short"] -> "short" else: _UpperCAmelCase = -100 _UpperCAmelCase = -100 answers_category.append('null' ) _UpperCAmelCase = inputs[-1][start_token : end_token + 1] answers_start_token.append(A ) answers_end_token.append(A ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('ISSUE in strided for ID:' , example['id'] ) print('New:' , tokenizer.decode(A ) ) print('Old:' , tokenizer.decode(A ) , end='\n\n' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def UpperCAmelCase ( A : List[Any] , A : int , A : Any=2048 , A : Optional[int]=4096 , A : Optional[Any]=False ): '''simple docstring''' _UpperCAmelCase = get_strided_contexts_and_ans( A , A , doc_stride=A , max_length=A , assertion=A , ) return example def UpperCAmelCase ( A : int , A : Dict ): '''simple docstring''' with jsonlines.open(A , 'a' ) as writer: for example in tqdm(A , total=len(A ) , desc='Saving samples ... ' ): _UpperCAmelCase = example['labels'] for ids, start, end, cat in zip( example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { 'input_ids': ids, 'start_token': start, 'end_token': end, 'category': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer lowercase = load_dataset('''natural_questions''') lowercase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''') lowercase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation'''] lowercase = { '''tokenizer''': tokenizer, '''doc_stride''': DOC_STRIDE, '''max_length''': MAX_LENGTH, '''assertion''': False, } lowercase = data.map(prepare_inputs, fn_kwargs=fn_kwargs) lowercase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question''']) print(data) np.random.seed(SEED) lowercase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl''' save_to_disk(data, file_name=cache_file_name)
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
1
"""simple docstring""" import string from math import logaa def UpperCAmelCase ( A : str , A : str ): '''simple docstring''' _UpperCAmelCase = document.translate( str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' ) _UpperCAmelCase = document_without_punctuation.split(' ' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def UpperCAmelCase ( A : str , A : str ): '''simple docstring''' _UpperCAmelCase = corpus.lower().translate( str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split('\n' ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(A )) def UpperCAmelCase ( A : int , A : int , A : Union[str, Any]=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError('log10(0) is undefined.' ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError('df must be > 0' ) elif n == 0: raise ValueError('log10(0) is undefined.' ) return round(logaa(n / df ) , 3 ) def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' return round(tf * idf , 3 )
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
1
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( A : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = str(A ) _UpperCAmelCase = [n] for i in range(1 , len(A ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def UpperCAmelCase ( A : int ): '''simple docstring''' if len(str(A ) ) > 3: if not is_prime(int(str(A )[-3:] ) ) or not is_prime(int(str(A )[:3] ) ): return False return True def UpperCAmelCase ( A : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(A ) != count: if validate(A ): _UpperCAmelCase = list_truncated_nums(A ) if all(is_prime(A ) for i in list_nums ): list_truncated_primes.append(A ) num += 2 return list_truncated_primes def UpperCAmelCase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F'''{sum(compute_truncated_primes(11)) = }''')
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
1
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=A , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=A , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=A , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=A , default='data/dump' , help='The dump file prefix.' ) _UpperCAmelCase = parser.parse_args() logger.info(f'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": _UpperCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) _UpperCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]` _UpperCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": _UpperCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) _UpperCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>` _UpperCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": _UpperCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) _UpperCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` _UpperCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(f'Loading text from {args.file_path}' ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: _UpperCAmelCase = fp.readlines() logger.info('Start encoding' ) logger.info(f'{len(A )} examples to process.' ) _UpperCAmelCase = [] _UpperCAmelCase = 0 _UpperCAmelCase = 1_0000 _UpperCAmelCase = time.time() for text in data: _UpperCAmelCase = f'{bos} {text.strip()} {sep}' _UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A ) rslt.append(A ) iter += 1 if iter % interval == 0: _UpperCAmelCase = time.time() logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) _UpperCAmelCase = time.time() logger.info('Finished binarization' ) logger.info(f'{len(A )} examples processed.' ) _UpperCAmelCase = f'{args.dump_file}.{args.tokenizer_name}.pickle' _UpperCAmelCase = tokenizer.vocab_size if vocab_size < (1 << 16): _UpperCAmelCase = [np.uintaa(A ) for d in rslt] else: _UpperCAmelCase = [np.intaa(A ) for d in rslt] random.shuffle(rslt_ ) logger.info(f'Dump to {dp_file}' ) with open(A , 'wb' ) as handle: pickle.dump(rslt_ , A , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
1
"""simple docstring""" from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = SMALL_MODEL_IDENTIFIER _UpperCAmelCase = 'pt' _UpperCAmelCase = 'tf' def lowerCamelCase_ ( self , snake_case ) -> Union[str, Any]: _UpperCAmelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case ) def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]: _UpperCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case ) model_tf.save_pretrained(snake_case ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = 'mock_framework' # Framework provided - return whatever the user provides _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model , snake_case ) self.assertEqual(snake_case , snake_case ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case ) _UpperCAmelCase = FeaturesManager.determine_framework(snake_case , snake_case ) self.assertEqual(snake_case , snake_case ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case ) _UpperCAmelCase = FeaturesManager.determine_framework(snake_case , snake_case ) self.assertEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> List[Any]: # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case ) _UpperCAmelCase = FeaturesManager.determine_framework(snake_case ) self.assertEqual(snake_case , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case ) _UpperCAmelCase = FeaturesManager.determine_framework(snake_case ) self.assertEqual(snake_case , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case ): _UpperCAmelCase = FeaturesManager.determine_framework(snake_case ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = MagicMock(return_value=snake_case ) with patch('transformers.onnx.features.is_tf_available' , snake_case ): _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _UpperCAmelCase = MagicMock(return_value=snake_case ) with patch('transformers.onnx.features.is_torch_available' , snake_case ): _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case , self.framework_tf ) # Both in environment -> use PyTorch _UpperCAmelCase = MagicMock(return_value=snake_case ) _UpperCAmelCase = MagicMock(return_value=snake_case ) with patch('transformers.onnx.features.is_tf_available' , snake_case ), patch( 'transformers.onnx.features.is_torch_available' , snake_case ): _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case , self.framework_pt ) # Both not in environment -> raise error _UpperCAmelCase = MagicMock(return_value=snake_case ) _UpperCAmelCase = MagicMock(return_value=snake_case ) with patch('transformers.onnx.features.is_tf_available' , snake_case ), patch( 'transformers.onnx.features.is_torch_available' , snake_case ): with self.assertRaises(snake_case ): _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
1
"""simple docstring""" def UpperCAmelCase ( A : Tuple ): '''simple docstring''' return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def UpperCAmelCase ( A : dict[int, list[int]] ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = len(A ) # No of vertices in graph _UpperCAmelCase = [0] * n _UpperCAmelCase = [False] * n def dfs(A : Dict , A : List[Any] , A : Any , A : Any ): _UpperCAmelCase = True _UpperCAmelCase = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(A , A , A , id_ ) _UpperCAmelCase = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge _UpperCAmelCase = min(low[at] , low[to] ) _UpperCAmelCase = [] for i in range(A ): if not visited[i]: dfs(A , -1 , A , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
1
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowercase = logging.get_logger(__name__) # General docstring lowercase = '''ResNetConfig''' # Base docstring lowercase = '''microsoft/resnet-50''' lowercase = [1, 20_48, 7, 7] # Image classification docstring lowercase = '''microsoft/resnet-50''' lowercase = '''tiger cat''' lowercase = [ '''microsoft/resnet-50''', # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ) -> Dict: super().__init__() _UpperCAmelCase = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) _UpperCAmelCase = nn.BatchNormad(snake_case ) _UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCamelCase_ ( self , snake_case ) -> Tensor: _UpperCAmelCase = self.convolution(snake_case ) _UpperCAmelCase = self.normalization(snake_case ) _UpperCAmelCase = self.activation(snake_case ) return hidden_state class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case ) -> Dict: super().__init__() _UpperCAmelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _UpperCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _UpperCAmelCase = config.num_channels def lowerCamelCase_ ( self , snake_case ) -> Tensor: _UpperCAmelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _UpperCAmelCase = self.embedder(snake_case ) _UpperCAmelCase = self.pooler(snake_case ) return embedding class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case = 2 ) -> Union[str, Any]: super().__init__() _UpperCAmelCase = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) _UpperCAmelCase = nn.BatchNormad(snake_case ) def lowerCamelCase_ ( self , snake_case ) -> Tensor: _UpperCAmelCase = self.convolution(snake_case ) _UpperCAmelCase = self.normalization(snake_case ) return hidden_state class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ) -> List[str]: super().__init__() _UpperCAmelCase = in_channels != out_channels or stride != 1 _UpperCAmelCase = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) _UpperCAmelCase = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) _UpperCAmelCase = ACTaFN[activation] def lowerCamelCase_ ( self , snake_case ) -> List[Any]: _UpperCAmelCase = hidden_state _UpperCAmelCase = self.layer(snake_case ) _UpperCAmelCase = self.shortcut(snake_case ) hidden_state += residual _UpperCAmelCase = self.activation(snake_case ) return hidden_state class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ) -> Optional[int]: super().__init__() _UpperCAmelCase = in_channels != out_channels or stride != 1 _UpperCAmelCase = out_channels // reduction _UpperCAmelCase = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) _UpperCAmelCase = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) _UpperCAmelCase = ACTaFN[activation] def lowerCamelCase_ ( self , snake_case ) -> int: _UpperCAmelCase = hidden_state _UpperCAmelCase = self.layer(snake_case ) _UpperCAmelCase = self.shortcut(snake_case ) hidden_state += residual _UpperCAmelCase = self.activation(snake_case ) return hidden_state class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ) -> Optional[int]: super().__init__() _UpperCAmelCase = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer _UpperCAmelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCamelCase_ ( self , snake_case ) -> Tensor: _UpperCAmelCase = input for layer in self.layers: _UpperCAmelCase = layer(snake_case ) return hidden_state class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case ) -> Optional[int]: super().__init__() _UpperCAmelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def lowerCamelCase_ ( self , snake_case , snake_case = False , snake_case = True ) -> BaseModelOutputWithNoAttention: _UpperCAmelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCAmelCase = hidden_states + (hidden_state,) _UpperCAmelCase = stage_module(snake_case ) if output_hidden_states: _UpperCAmelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = ResNetConfig _UpperCAmelCase = '''resnet''' _UpperCAmelCase = '''pixel_values''' _UpperCAmelCase = True def lowerCamelCase_ ( self , snake_case ) -> str: if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCamelCase_ ( self , snake_case , snake_case=False ) -> Optional[Any]: if isinstance(snake_case , snake_case ): _UpperCAmelCase = value lowercase = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowercase = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''', A, ) class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case ) -> Tuple: super().__init__(snake_case ) _UpperCAmelCase = config _UpperCAmelCase = ResNetEmbeddings(snake_case ) _UpperCAmelCase = ResNetEncoder(snake_case ) _UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = None ) -> BaseModelOutputWithPoolingAndNoAttention: _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.embedder(snake_case ) _UpperCAmelCase = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) _UpperCAmelCase = encoder_outputs[0] _UpperCAmelCase = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', A, ) class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case ) -> str: super().__init__(snake_case ) _UpperCAmelCase = config.num_labels _UpperCAmelCase = ResNetModel(snake_case ) # classification head _UpperCAmelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase_ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ) -> ImageClassifierOutputWithNoAttention: _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) _UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCAmelCase = self.classifier(snake_case ) _UpperCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCAmelCase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCAmelCase = 'single_label_classification' else: _UpperCAmelCase = 'multi_label_classification' if self.config.problem_type == "regression": _UpperCAmelCase = MSELoss() if self.num_labels == 1: _UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCAmelCase = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": _UpperCAmelCase = CrossEntropyLoss() _UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCAmelCase = BCEWithLogitsLoss() _UpperCAmelCase = loss_fct(snake_case , snake_case ) if not return_dict: _UpperCAmelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''', A, ) class lowercase__ ( A, A ): '''simple docstring''' def __init__( self , snake_case ) -> List[str]: super().__init__(snake_case ) super()._init_backbone(snake_case ) _UpperCAmelCase = [config.embedding_size] + config.hidden_sizes _UpperCAmelCase = ResNetEmbeddings(snake_case ) _UpperCAmelCase = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = None ) -> BackboneOutput: _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = self.embedder(snake_case ) _UpperCAmelCase = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _UpperCAmelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self , snake_case , snake_case ) -> Optional[Any]: super().__init__() self.register_modules(unet=snake_case , scheduler=snake_case ) @torch.no_grad() def __call__( self , snake_case = 1 , snake_case = 2000 , snake_case = None , snake_case = "pil" , snake_case = True , **snake_case , ) -> Union[ImagePipelineOutput, Tuple]: _UpperCAmelCase = self.unet.config.sample_size _UpperCAmelCase = (batch_size, 3, img_size, img_size) _UpperCAmelCase = self.unet _UpperCAmelCase = randn_tensor(snake_case , generator=snake_case ) * self.scheduler.init_noise_sigma _UpperCAmelCase = sample.to(self.device ) self.scheduler.set_timesteps(snake_case ) self.scheduler.set_sigmas(snake_case ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): _UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): _UpperCAmelCase = self.unet(snake_case , snake_case ).sample _UpperCAmelCase = self.scheduler.step_correct(snake_case , snake_case , generator=snake_case ).prev_sample # prediction step _UpperCAmelCase = model(snake_case , snake_case ).sample _UpperCAmelCase = self.scheduler.step_pred(snake_case , snake_case , snake_case , generator=snake_case ) _UpperCAmelCase , _UpperCAmelCase = output.prev_sample, output.prev_sample_mean _UpperCAmelCase = sample_mean.clamp(0 , 1 ) _UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(snake_case ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case )
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
1
"""simple docstring""" def UpperCAmelCase ( A : int , A : float , A : float ): '''simple docstring''' return round(float(moles / volume ) * nfactor ) def UpperCAmelCase ( A : float , A : float , A : float ): '''simple docstring''' return round(float((moles * 0.0821 * temperature) / (volume) ) ) def UpperCAmelCase ( A : float , A : float , A : float ): '''simple docstring''' return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def UpperCAmelCase ( A : float , A : float , A : float ): '''simple docstring''' return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCAmelCase ( A : List[Any] , A : Dict ): '''simple docstring''' _UpperCAmelCase = old_name if "patch_embed" in old_name: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = old_name.split('.' ) if layer == "0": _UpperCAmelCase = old_name.replace('0' , 'convolution1' ) elif layer == "1": _UpperCAmelCase = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": _UpperCAmelCase = old_name.replace('3' , 'convolution2' ) else: _UpperCAmelCase = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(r'\d\.\d' , A ): _UpperCAmelCase = r'\b\d{2}\b' if bool(re.search(A , A ) ): _UpperCAmelCase = re.search(r'\d\.\d\d.' , A ).group() else: _UpperCAmelCase = re.search(r'\d\.\d.' , A ).group() if int(match[0] ) < 6: _UpperCAmelCase = old_name.replace(A , '' ) _UpperCAmelCase = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) _UpperCAmelCase = 'intermediate_stages.' + trimmed_name else: _UpperCAmelCase = old_name.replace(A , '' ) if int(match[2] ) < num_meta4D_last_stage: _UpperCAmelCase = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: _UpperCAmelCase = str(int(match[2] ) - num_meta4D_last_stage ) _UpperCAmelCase = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: _UpperCAmelCase = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: _UpperCAmelCase = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: _UpperCAmelCase = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: _UpperCAmelCase = trimmed_name.replace('fc2' , 'linear_out' ) _UpperCAmelCase = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(r'.\d.' , A ): _UpperCAmelCase = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: _UpperCAmelCase = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): _UpperCAmelCase = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): _UpperCAmelCase = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: _UpperCAmelCase = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: _UpperCAmelCase = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: _UpperCAmelCase = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: _UpperCAmelCase = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": _UpperCAmelCase = new_name.replace('norm' , 'layernorm' ) _UpperCAmelCase = 'efficientformer.' + new_name else: _UpperCAmelCase = 'efficientformer.encoder.' + new_name return new_name def UpperCAmelCase ( A : List[Any] , A : List[str] ): '''simple docstring''' for key in checkpoint.copy().keys(): _UpperCAmelCase = checkpoint.pop(A ) _UpperCAmelCase = val return checkpoint def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw ) return image def UpperCAmelCase ( A : Path , A : Path , A : Path , A : bool ): '''simple docstring''' _UpperCAmelCase = torch.load(A , map_location='cpu' )['model'] _UpperCAmelCase = EfficientFormerConfig.from_json_file(A ) _UpperCAmelCase = EfficientFormerForImageClassificationWithTeacher(A ) _UpperCAmelCase = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) _UpperCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1 _UpperCAmelCase = convert_torch_checkpoint(A , A ) model.load_state_dict(A ) model.eval() _UpperCAmelCase = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image _UpperCAmelCase = prepare_img() _UpperCAmelCase = 256 _UpperCAmelCase = 224 _UpperCAmelCase = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) _UpperCAmelCase = processor(images=A , return_tensors='pt' ).pixel_values # original processing pipeline _UpperCAmelCase = Compose( [ Resize(A , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(A ), ToTensor(), Normalize(A , A ), ] ) _UpperCAmelCase = image_transforms(A ).unsqueeze(0 ) assert torch.allclose(A , A ) _UpperCAmelCase = model(A ) _UpperCAmelCase = outputs.logits _UpperCAmelCase = (1, 1000) if "l1" in model_name: _UpperCAmelCase = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , A , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: _UpperCAmelCase = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , A , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: _UpperCAmelCase = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' ) # Save Checkpoints Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) processor.save_pretrained(A ) print(f'Processor successfuly saved at {pytorch_dump_path}' ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=A , ) processor.push_to_hub( repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=A , ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) lowercase = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def UpperCAmelCase ( A : float ): '''simple docstring''' if num <= 0: raise ValueError('math domain error' ) return quad(A , 0 , A , args=(A) )[0] def UpperCAmelCase ( A : float , A : float ): '''simple docstring''' return math.pow(A , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" from scipy.stats import pearsonr import datasets lowercase = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' lowercase = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' lowercase = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=False ) -> List[str]: if return_pvalue: _UpperCAmelCase = pearsonr(snake_case , snake_case ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(snake_case , snake_case )[0] )}
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" class lowercase__ : '''simple docstring''' def __init__( self , snake_case ) -> int: _UpperCAmelCase = n _UpperCAmelCase = [None] * self.n _UpperCAmelCase = 0 # index of the first element _UpperCAmelCase = 0 _UpperCAmelCase = 0 def __len__( self ) -> int: return self.size def lowerCamelCase_ ( self ) -> bool: return self.size == 0 def lowerCamelCase_ ( self ) -> int: return False if self.is_empty() else self.array[self.front] def lowerCamelCase_ ( self , snake_case ) -> Union[str, Any]: if self.size >= self.n: raise Exception('QUEUE IS FULL' ) _UpperCAmelCase = data _UpperCAmelCase = (self.rear + 1) % self.n self.size += 1 return self def lowerCamelCase_ ( self ) -> List[str]: if self.size == 0: raise Exception('UNDERFLOW' ) _UpperCAmelCase = self.array[self.front] _UpperCAmelCase = None _UpperCAmelCase = (self.front + 1) % self.n self.size -= 1 return temp
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
1
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from collections.abc import Callable import numpy as np def UpperCAmelCase ( A : Callable , A : float , A : float , A : float , A : float ): '''simple docstring''' _UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) ) _UpperCAmelCase = np.zeros((n + 1,) ) _UpperCAmelCase = ya _UpperCAmelCase = xa for k in range(A ): _UpperCAmelCase = y[k] + step_size * ode_func(A , y[k] ) _UpperCAmelCase = y[k] + ( (step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''cvt''' def __init__( self , snake_case=3 , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[64, 192, 384] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[4.0, 4.0, 4.0] , snake_case=[0.0, 0.0, 0.0] , snake_case=[0.0, 0.0, 0.0] , snake_case=[0.0, 0.0, 0.1] , snake_case=[True, True, True] , snake_case=[False, False, True] , snake_case=["dw_bn", "dw_bn", "dw_bn"] , snake_case=[3, 3, 3] , snake_case=[1, 1, 1] , snake_case=[2, 2, 2] , snake_case=[1, 1, 1] , snake_case=[1, 1, 1] , snake_case=0.02 , snake_case=1E-12 , **snake_case , ) -> Optional[Any]: super().__init__(**snake_case ) _UpperCAmelCase = num_channels _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = depth _UpperCAmelCase = mlp_ratio _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = drop_rate _UpperCAmelCase = drop_path_rate _UpperCAmelCase = qkv_bias _UpperCAmelCase = cls_token _UpperCAmelCase = qkv_projection_method _UpperCAmelCase = kernel_qkv _UpperCAmelCase = padding_kv _UpperCAmelCase = stride_kv _UpperCAmelCase = padding_q _UpperCAmelCase = stride_q _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' return number | (1 << position) def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' return number & ~(1 << position) def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' return number ^ (1 << position) def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' return ((number >> position) & 1) == 1 def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
1
"""simple docstring""" import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( A : List[str] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'file.csv' _UpperCAmelCase = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def UpperCAmelCase ( A : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'malformed_file.csv' _UpperCAmelCase = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def UpperCAmelCase ( A : Any , A : int ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'csv_with_image.csv' _UpperCAmelCase = textwrap.dedent( f'\\n image\n {image_file}\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def UpperCAmelCase ( A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'csv_with_label.csv' _UpperCAmelCase = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def UpperCAmelCase ( A : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'csv_with_int_list.csv' _UpperCAmelCase = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) def UpperCAmelCase ( A : Dict , A : Dict , A : str ): '''simple docstring''' _UpperCAmelCase = Csv() _UpperCAmelCase = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(A , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(A ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( A : Union[str, Any] ): '''simple docstring''' with open(A , encoding='utf-8' ) as f: _UpperCAmelCase = f.read().splitlines()[1] _UpperCAmelCase = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) _UpperCAmelCase = csv._generate_tables([[csv_file_with_image]] ) _UpperCAmelCase = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() _UpperCAmelCase = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( A : Dict ): '''simple docstring''' with open(A , encoding='utf-8' ) as f: _UpperCAmelCase = f.read().splitlines()[1:] _UpperCAmelCase = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) _UpperCAmelCase = csv._generate_tables([[csv_file_with_label]] ) _UpperCAmelCase = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() _UpperCAmelCase = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(A ) for label in labels] def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda A : [int(A ) for i in x.split()]} ) _UpperCAmelCase = csv._generate_tables([[csv_file_with_int_list]] ) _UpperCAmelCase = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) _UpperCAmelCase = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
1
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def UpperCAmelCase ( A : Dict ): '''simple docstring''' _UpperCAmelCase = filter(lambda A : p.requires_grad , model.parameters() ) _UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowercase = logging.getLogger(__name__) def UpperCAmelCase ( A : str , A : int ): '''simple docstring''' if metric == "rouge2": _UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": _UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": _UpperCAmelCase = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": _UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) _UpperCAmelCase = ModelCheckpoint( dirpath=A , filename=A , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ): '''simple docstring''' return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=A , verbose=A , ) class lowercase__ ( pl.Callback ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = {f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(snake_case ) @rank_zero_only def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=True ) -> None: logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _UpperCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results _UpperCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": _UpperCAmelCase = od / 'test_results.txt' _UpperCAmelCase = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _UpperCAmelCase = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _UpperCAmelCase = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=snake_case ) generations_file.parent.mkdir(exist_ok=snake_case ) with open(snake_case , 'a+' ) as writer: for key in sorted(snake_case ): if key in ["log", "progress_bar", "preds"]: continue _UpperCAmelCase = metrics[key] if isinstance(snake_case , torch.Tensor ): _UpperCAmelCase = val.item() _UpperCAmelCase = f'{key}: {val:.6f}\n' writer.write(snake_case ) if not save_generations: return if "preds" in metrics: _UpperCAmelCase = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(snake_case ) @rank_zero_only def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[Any]: try: _UpperCAmelCase = pl_module.model.model.num_parameters() except AttributeError: _UpperCAmelCase = pl_module.model.num_parameters() _UpperCAmelCase = count_trainable_parameters(snake_case ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def lowerCamelCase_ ( self , snake_case , snake_case ) -> int: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(snake_case , snake_case , 'test' ) @rank_zero_only def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
1
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase = ( '''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py''' ) lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = 'https://pypi.org/pypi/diffusers/json' _UpperCAmelCase = json.loads(request.urlopen(A ).read() )['releases'].keys() return sorted(A , key=lambda A : version.Version(A ) ) def UpperCAmelCase ( ): '''simple docstring''' if HF_MODULES_CACHE in sys.path: return sys.path.append(A ) os.makedirs(A , exist_ok=A ) _UpperCAmelCase = Path(A ) / '__init__.py' if not init_path.exists(): init_path.touch() def UpperCAmelCase ( A : Union[str, os.PathLike] ): '''simple docstring''' init_hf_modules() _UpperCAmelCase = Path(A ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(A , exist_ok=A ) _UpperCAmelCase = dynamic_module_path / '__init__.py' if not init_path.exists(): init_path.touch() def UpperCAmelCase ( A : List[Any] ): '''simple docstring''' with open(A , 'r' , encoding='utf-8' ) as f: _UpperCAmelCase = f.read() # Imports of the form `import .xxx` _UpperCAmelCase = re.findall('^\s*import\s+\.(\S+)\s*$' , A , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , A , flags=re.MULTILINE ) # Unique-ify return list(set(A ) ) def UpperCAmelCase ( A : List[str] ): '''simple docstring''' _UpperCAmelCase = False _UpperCAmelCase = [module_file] _UpperCAmelCase = [] # Let's recurse through all relative imports while not no_change: _UpperCAmelCase = [] for f in files_to_check: new_imports.extend(get_relative_imports(A ) ) _UpperCAmelCase = Path(A ).parent _UpperCAmelCase = [str(module_path / m ) for m in new_imports] _UpperCAmelCase = [f for f in new_import_files if f not in all_relative_imports] _UpperCAmelCase = [f'{f}.py' for f in new_import_files] _UpperCAmelCase = len(A ) == 0 all_relative_imports.extend(A ) return all_relative_imports def UpperCAmelCase ( A : str ): '''simple docstring''' with open(A , 'r' , encoding='utf-8' ) as f: _UpperCAmelCase = f.read() # Imports of the form `import xxx` _UpperCAmelCase = re.findall('^\s*import\s+(\S+)\s*$' , A , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('^\s*from\s+(\S+)\s+import' , A , flags=re.MULTILINE ) # Only keep the top-level module _UpperCAmelCase = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )] # Unique-ify and test we got them all _UpperCAmelCase = list(set(A ) ) _UpperCAmelCase = [] for imp in imports: try: importlib.import_module(A ) except ImportError: missing_packages.append(A ) if len(A ) > 0: raise ImportError( 'This modeling file requires the following packages that were not found in your environment: ' f'{", ".join(A )}. Run `pip install {" ".join(A )}`' ) return get_relative_imports(A ) def UpperCAmelCase ( A : Dict , A : Tuple ): '''simple docstring''' _UpperCAmelCase = module_path.replace(os.path.sep , '.' ) _UpperCAmelCase = importlib.import_module(A ) if class_name is None: return find_pipeline_class(A ) return getattr(A , A ) def UpperCAmelCase ( A : Tuple ): '''simple docstring''' from ..pipelines import DiffusionPipeline _UpperCAmelCase = dict(inspect.getmembers(A , inspect.isclass ) ) _UpperCAmelCase = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , A ) and cls.__module__.split('.' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) _UpperCAmelCase = cls return pipeline_class def UpperCAmelCase ( A : Union[str, os.PathLike] , A : str , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , ): '''simple docstring''' _UpperCAmelCase = str(A ) _UpperCAmelCase = os.path.join(A , A ) if os.path.isfile(A ): _UpperCAmelCase = module_file_or_url _UpperCAmelCase = 'local' elif pretrained_model_name_or_path.count('/' ) == 0: _UpperCAmelCase = get_diffusers_versions() # cut ".dev0" _UpperCAmelCase = 'v' + '.'.join(__version__.split('.' )[:3] ) # retrieve github version that matches if revision is None: _UpperCAmelCase = latest_version if latest_version[1:] in available_versions else 'main' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: _UpperCAmelCase = f'v{revision}' elif revision == "main": _UpperCAmelCase = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub _UpperCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=A , pipeline=A ) try: _UpperCAmelCase = cached_download( A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , ) _UpperCAmelCase = 'git' _UpperCAmelCase = pretrained_model_name_or_path + '.py' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached _UpperCAmelCase = hf_hub_download( A , A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , ) _UpperCAmelCase = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment _UpperCAmelCase = check_imports(A ) # Now we move the module inside our cached dynamic modules. _UpperCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(A ) _UpperCAmelCase = Path(A ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(A , submodule_path / module_file ) for module_needed in modules_needed: _UpperCAmelCase = f'{module_needed}.py' shutil.copy(os.path.join(A , A ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(A , A ): _UpperCAmelCase = use_auth_token elif use_auth_token is True: _UpperCAmelCase = HfFolder.get_token() else: _UpperCAmelCase = None _UpperCAmelCase = model_info(A , revision=A , token=A ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. _UpperCAmelCase = submodule_path / commit_hash _UpperCAmelCase = full_submodule + os.path.sep + commit_hash create_dynamic_module(A ) if not (submodule_path / module_file).exists(): shutil.copy(A , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( A , f'{module_needed}.py' , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , ) return os.path.join(A , A ) def UpperCAmelCase ( A : Union[str, os.PathLike] , A : str , A : Optional[str] = None , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , **A : Dict , ): '''simple docstring''' _UpperCAmelCase = get_cached_module_file( A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , ) return get_class_in_module(A , final_module.replace('.py' , '' ) )
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowercase = logging.get_logger(__name__) lowercase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowercase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase ( A : Optional[Any] , A : Optional[Any] , A : List[str] , A : Dict , A : Tuple ): '''simple docstring''' for attribute in key.split('.' ): _UpperCAmelCase = getattr(A , A ) if weight_type is not None: _UpperCAmelCase = getattr(A , A ).shape else: _UpperCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _UpperCAmelCase = value elif weight_type == "weight_g": _UpperCAmelCase = value elif weight_type == "weight_v": _UpperCAmelCase = value elif weight_type == "bias": _UpperCAmelCase = value elif weight_type == "running_mean": _UpperCAmelCase = value elif weight_type == "running_var": _UpperCAmelCase = value elif weight_type == "num_batches_tracked": _UpperCAmelCase = value elif weight_type == "inv_freq": _UpperCAmelCase = value else: _UpperCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def UpperCAmelCase ( A : Any , A : List[Any] , A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = fairseq_model.state_dict() _UpperCAmelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _UpperCAmelCase = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) _UpperCAmelCase = True else: for key, mapped_key in MAPPING.items(): _UpperCAmelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: _UpperCAmelCase = True if "*" in mapped_key: _UpperCAmelCase = name.split(A )[0].split('.' )[-2] _UpperCAmelCase = mapped_key.replace('*' , A ) if "pos_bias_u" in name: _UpperCAmelCase = None elif "pos_bias_v" in name: _UpperCAmelCase = None elif "weight_g" in name: _UpperCAmelCase = 'weight_g' elif "weight_v" in name: _UpperCAmelCase = 'weight_v' elif "bias" in name: _UpperCAmelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj _UpperCAmelCase = 'weight' elif "running_mean" in name: _UpperCAmelCase = 'running_mean' elif "inv_freq" in name: _UpperCAmelCase = 'inv_freq' elif "running_var" in name: _UpperCAmelCase = 'running_var' elif "num_batches_tracked" in name: _UpperCAmelCase = 'num_batches_tracked' else: _UpperCAmelCase = None set_recursively(A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(f'Unused weights: {unused_weights}' ) def UpperCAmelCase ( A : Dict , A : List[Any] , A : List[Any] , A : Dict , A : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = full_name.split('conv_layers.' )[-1] _UpperCAmelCase = name.split('.' ) _UpperCAmelCase = int(items[0] ) _UpperCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(A ) @torch.no_grad() def UpperCAmelCase ( A : List[str] , A : Dict , A : Dict=None , A : Optional[Any]=None , A : int=True ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = WavaVecaConformerConfig.from_pretrained(A , hidden_act='swish' ) else: _UpperCAmelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: _UpperCAmelCase = 'rotary' if is_finetuned: if dict_path: _UpperCAmelCase = Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCAmelCase = target_dict.pad_index _UpperCAmelCase = target_dict.bos_index _UpperCAmelCase = target_dict.eos_index _UpperCAmelCase = len(target_dict.symbols ) _UpperCAmelCase = os.path.join(A , 'vocab.json' ) if not os.path.isdir(A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) ) return os.makedirs(A , exist_ok=A ) _UpperCAmelCase = target_dict.indices # fairseq has the <pad> and <s> switched _UpperCAmelCase = 0 _UpperCAmelCase = 1 with open(A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(A , A ) _UpperCAmelCase = WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , ) _UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False _UpperCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) _UpperCAmelCase = WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) _UpperCAmelCase = WavaVecaConformerForCTC(A ) else: _UpperCAmelCase = WavaVecaConformerForPreTraining(A ) if is_finetuned: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _UpperCAmelCase = argparse.Namespace(task='audio_pretraining' ) _UpperCAmelCase = fairseq.tasks.setup_task(A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A ) _UpperCAmelCase = model[0].eval() recursively_load_weights(A , A , not is_finetuned ) hf_wavavec.save_pretrained(A ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowercase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' _UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw ).convert('RGB' ) return image def UpperCAmelCase ( A : Any ): '''simple docstring''' _UpperCAmelCase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def UpperCAmelCase ( A : Dict , A : str , A : List[Any] ): '''simple docstring''' _UpperCAmelCase = dct.pop(A ) _UpperCAmelCase = val def UpperCAmelCase ( A : Optional[Any] , A : Optional[Any] ): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _UpperCAmelCase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' ) _UpperCAmelCase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict _UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) ) _UpperCAmelCase = qkv_bias def UpperCAmelCase ( A : Optional[Any] , A : List[Any] ): '''simple docstring''' _UpperCAmelCase = 364 if 'coco' in model_name else 224 _UpperCAmelCase = BlipaVisionConfig(image_size=A ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _UpperCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=A ).to_dict() elif "opt-6.7b" in model_name: _UpperCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=A ).to_dict() elif "t5-xl" in model_name: _UpperCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _UpperCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() _UpperCAmelCase = BlipaConfig(vision_config=A , text_config=A ) return config, image_size @torch.no_grad() def UpperCAmelCase ( A : str , A : List[Any]=None , A : Any=False ): '''simple docstring''' _UpperCAmelCase = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) _UpperCAmelCase = tokenizer('\n' , add_special_tokens=A ).input_ids[0] _UpperCAmelCase , _UpperCAmelCase = get_blipa_config(A , eos_token_id=A ) _UpperCAmelCase = BlipaForConditionalGeneration(A ).eval() _UpperCAmelCase = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } _UpperCAmelCase , _UpperCAmelCase = model_name_to_original[model_name] # load original model print('Loading original model...' ) _UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_model_and_preprocess( name=A , model_type=A , is_eval=A , device=A ) original_model.eval() print('Done!' ) # update state dict keys _UpperCAmelCase = original_model.state_dict() _UpperCAmelCase = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _UpperCAmelCase = state_dict.pop(A ) if key.startswith('Qformer.bert' ): _UpperCAmelCase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: _UpperCAmelCase = key.replace('self' , 'attention' ) if "opt_proj" in key: _UpperCAmelCase = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: _UpperCAmelCase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): _UpperCAmelCase = key.replace('opt' , 'language' ) if key.startswith('t5' ): _UpperCAmelCase = key.replace('t5' , 'language' ) _UpperCAmelCase = val # read in qv biases read_in_q_v_bias(A , A ) _UpperCAmelCase , _UpperCAmelCase = hf_model.load_state_dict(A , strict=A ) assert len(A ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _UpperCAmelCase = load_demo_image() _UpperCAmelCase = vis_processors['eval'](A ).unsqueeze(0 ).to(A ) _UpperCAmelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(A ) # create processor _UpperCAmelCase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=A , image_std=A ) _UpperCAmelCase = BlipaProcessor(image_processor=A , tokenizer=A ) _UpperCAmelCase = processor(images=A , return_tensors='pt' ).pixel_values.to(A ) # make sure processor creates exact same pixel values assert torch.allclose(A , A ) original_model.to(A ) hf_model.to(A ) with torch.no_grad(): if "opt" in model_name: _UpperCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits _UpperCAmelCase = hf_model(A , A ).logits else: _UpperCAmelCase = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits _UpperCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) _UpperCAmelCase = hf_model(A , A , labels=A ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _UpperCAmelCase = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=A ) assert torch.allclose(logits[0, :3, :3] , A , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _UpperCAmelCase = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=A ) else: # cast to same type _UpperCAmelCase = logits.dtype assert torch.allclose(original_logits.to(A ) , A , atol=1e-2 ) print('Looks ok!' ) print('Generating a caption...' ) _UpperCAmelCase = '' _UpperCAmelCase = tokenizer(A , return_tensors='pt' ).input_ids.to(A ) _UpperCAmelCase = original_model.generate({'image': original_pixel_values} ) _UpperCAmelCase = hf_model.generate( A , A , do_sample=A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , A ) _UpperCAmelCase = input_ids.shape[1] _UpperCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=A ) _UpperCAmelCase = [text.strip() for text in output_text] print('HF generation:' , A ) if pytorch_dump_folder_path is not None: processor.save_pretrained(A ) hf_model.save_pretrained(A ) if push_to_hub: processor.push_to_hub(f'nielsr/{model_name}' ) hf_model.push_to_hub(f'nielsr/{model_name}' ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() lowercase = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) lowercase = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
1
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def UpperCAmelCase ( A : Dict , A : str , A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = OmegaConf.load(A ) _UpperCAmelCase = torch.load(A , map_location='cpu' )['model'] _UpperCAmelCase = list(state_dict.keys() ) # extract state_dict for VQVAE _UpperCAmelCase = {} _UpperCAmelCase = 'first_stage_model.' for key in keys: if key.startswith(A ): _UpperCAmelCase = state_dict[key] # extract state_dict for UNetLDM _UpperCAmelCase = {} _UpperCAmelCase = 'model.diffusion_model.' for key in keys: if key.startswith(A ): _UpperCAmelCase = state_dict[key] _UpperCAmelCase = config.model.params.first_stage_config.params _UpperCAmelCase = config.model.params.unet_config.params _UpperCAmelCase = VQModel(**A ).eval() vqvae.load_state_dict(A ) _UpperCAmelCase = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) _UpperCAmelCase = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) _UpperCAmelCase = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) lowercase = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
1
"""simple docstring""" from __future__ import annotations from random import random class lowercase__ : '''simple docstring''' def __init__( self , snake_case = None ) -> List[str]: _UpperCAmelCase = value _UpperCAmelCase = random() _UpperCAmelCase = None _UpperCAmelCase = None def __repr__( self ) -> str: from pprint import pformat if self.left is None and self.right is None: return f'\'{self.value}: {self.prior:.5}\'' else: return pformat( {f'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: _UpperCAmelCase = str(self.value ) + ' ' _UpperCAmelCase = str(self.left or '' ) _UpperCAmelCase = str(self.right or '' ) return value + left + right def UpperCAmelCase ( A : Node | None , A : int ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: _UpperCAmelCase , _UpperCAmelCase = split(root.left , A ) return left, root else: _UpperCAmelCase , _UpperCAmelCase = split(root.right , A ) return root, right def UpperCAmelCase ( A : Node | None , A : Node | None ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: _UpperCAmelCase = merge(left.right , A ) return left else: _UpperCAmelCase = merge(A , right.left ) return right def UpperCAmelCase ( A : Node | None , A : int ): '''simple docstring''' _UpperCAmelCase = Node(A ) _UpperCAmelCase , _UpperCAmelCase = split(A , A ) return merge(merge(A , A ) , A ) def UpperCAmelCase ( A : Node | None , A : int ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = split(A , value - 1 ) _UpperCAmelCase , _UpperCAmelCase = split(A , A ) return merge(A , A ) def UpperCAmelCase ( A : Node | None ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=',' ) inorder(root.right ) def UpperCAmelCase ( A : Node | None , A : str ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": _UpperCAmelCase = insert(A , int(arg[1:] ) ) elif arg[0] == "-": _UpperCAmelCase = erase(A , int(arg[1:] ) ) else: print('Unknown command' ) return root def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) _UpperCAmelCase = input() while args != "q": _UpperCAmelCase = interact_treap(A , A ) print(A ) _UpperCAmelCase = input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
1
"""simple docstring""" import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets lowercase = '''\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ''' lowercase = '''\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. ''' lowercase = ''' Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for \'cvit-mkb-clsr\' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "precision": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'precision@10\': 1.0} ''' def UpperCAmelCase ( A : Optional[int] , A : List[str] ): '''simple docstring''' return float((preds == labels).mean() ) def UpperCAmelCase ( A : List[str] , A : Dict ): '''simple docstring''' _UpperCAmelCase = simple_accuracy(A , A ) _UpperCAmelCase = float(fa_score(y_true=A , y_pred=A ) ) return { "accuracy": acc, "f1": fa, } def UpperCAmelCase ( A : Optional[int] , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = np.array(A ) _UpperCAmelCase = np.array(A ) _UpperCAmelCase = en_sentvecs.shape[0] # mean centering _UpperCAmelCase = en_sentvecs - np.mean(A , axis=0 ) _UpperCAmelCase = in_sentvecs - np.mean(A , axis=0 ) _UpperCAmelCase = cdist(A , A , 'cosine' ) _UpperCAmelCase = np.array(range(A ) ) _UpperCAmelCase = sim.argsort(axis=1 )[:, :10] _UpperCAmelCase = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Optional[int]: if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), 'references': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , ) def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]: if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(snake_case , snake_case )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(snake_case , snake_case ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(snake_case , snake_case )} else: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' )
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
1
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = XGLMTokenizer _UpperCAmelCase = XGLMTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> int: super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = XGLMTokenizer(snake_case , keep_accents=snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(snake_case ) , 1008 ) def lowerCamelCase_ ( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = XGLMTokenizer(snake_case , keep_accents=snake_case ) _UpperCAmelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case ) self.assertListEqual( snake_case , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual( snake_case , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def lowerCamelCase_ ( self ) -> int: return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def lowerCamelCase_ ( self ) -> Optional[int]: with tempfile.NamedTemporaryFile() as f: shutil.copyfile(snake_case , f.name ) _UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=snake_case ) _UpperCAmelCase = pickle.dumps(snake_case ) pickle.loads(snake_case ) def lowerCamelCase_ ( self ) -> int: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = 'Hello World!' _UpperCAmelCase = [2, 31227, 4447, 35] self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) ) @slow def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off _UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) ) @slow def lowerCamelCase_ ( self ) -> str: # fmt: off _UpperCAmelCase = { 'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='facebook/xglm-564M' , padding=snake_case , )
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowercase = sys.version_info >= (3, 10) def UpperCAmelCase ( A : str=None , A : Optional[int]=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=A ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = field(default='''toto''', metadata={'''help''': '''help message'''} ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = None class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''titi''' _UpperCAmelCase = '''toto''' class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''titi''' _UpperCAmelCase = '''toto''' _UpperCAmelCase = 42 @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = "toto" def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = "toto" def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = None _UpperCAmelCase = field(default=A, metadata={'''help''': '''help message'''} ) _UpperCAmelCase = None _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[] ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[1, 2, 3] ) _UpperCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) _UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = field() _UpperCAmelCase = field() _UpperCAmelCase = field() def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = field() _UpperCAmelCase = None _UpperCAmelCase = field(default='''toto''', metadata={'''help''': '''help message'''} ) _UpperCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = None @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = None _UpperCAmelCase = field(default=A, metadata={'''help''': '''help message'''} ) _UpperCAmelCase = None _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[] ) class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[int]: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(snake_case ).items() if k != 'container'} _UpperCAmelCase = {k: v for k, v in vars(snake_case ).items() if k != 'container'} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , snake_case ) and yy.get('choices' , snake_case ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](snake_case ) , yy['type'](snake_case ) ) del xx["type"], yy["type"] self.assertEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=snake_case , required=snake_case ) expected.add_argument('--bar' , type=snake_case , required=snake_case ) expected.add_argument('--baz' , type=snake_case , required=snake_case ) expected.add_argument('--flag' , type=snake_case , default=snake_case , const=snake_case , nargs='?' ) self.argparsersEqual(snake_case , snake_case ) _UpperCAmelCase = ['--foo', '1', '--baz', 'quux', '--bar', '0.5'] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(snake_case , look_for_args_file=snake_case ) self.assertFalse(example.flag ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=snake_case ) expected.add_argument('--baz' , default='toto' , type=snake_case , help='help message' ) self.argparsersEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=snake_case , default=snake_case , const=snake_case , nargs='?' ) expected.add_argument('--baz' , type=snake_case , default=snake_case , const=snake_case , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=snake_case , dest='baz' ) expected.add_argument('--opt' , type=snake_case , default=snake_case ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(snake_case ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(snake_case ) self.argparsersEqual(snake_case , snake_case ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) ) _UpperCAmelCase = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) ) _UpperCAmelCase = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) ) _UpperCAmelCase = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) ) _UpperCAmelCase = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(snake_case , snake_case ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCamelCase_ ( self ) -> Optional[int]: @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = "toto" _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(snake_case , snake_case ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) _UpperCAmelCase = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) _UpperCAmelCase = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=snake_case ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=snake_case ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=snake_case ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=snake_case ) self.argparsersEqual(snake_case , snake_case ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( snake_case , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(snake_case , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , default=snake_case , type=snake_case ) expected.add_argument('--bar' , default=snake_case , type=snake_case , help='help message' ) expected.add_argument('--baz' , default=snake_case , type=snake_case ) expected.add_argument('--ces' , nargs='+' , default=[] , type=snake_case ) expected.add_argument('--des' , nargs='+' , default=[] , type=snake_case ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(snake_case ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(snake_case ) self.argparsersEqual(snake_case , snake_case ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(snake_case , Namespace(foo=snake_case , bar=snake_case , baz=snake_case , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(snake_case , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=snake_case , required=snake_case ) expected.add_argument('--required_str' , type=snake_case , required=snake_case ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=snake_case , ) self.argparsersEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=snake_case , required=snake_case ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=snake_case , ) expected.add_argument('--opt' , type=snake_case , default=snake_case ) expected.add_argument('--baz' , default='toto' , type=snake_case , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=snake_case ) self.argparsersEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } _UpperCAmelCase = parser.parse_dict(snake_case )[0] _UpperCAmelCase = BasicExample(**snake_case ) self.assertEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, 'extra': 42, } self.assertRaises(snake_case , parser.parse_dict , snake_case , allow_extra_keys=snake_case ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(snake_case , 'temp_json' ) os.mkdir(snake_case ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(snake_case , snake_case ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] _UpperCAmelCase = BasicExample(**snake_case ) self.assertEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = HfArgumentParser(snake_case ) _UpperCAmelCase = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(snake_case , 'temp_yaml' ) os.mkdir(snake_case ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(snake_case , snake_case ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] _UpperCAmelCase = BasicExample(**snake_case ) self.assertEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = HfArgumentParser(snake_case ) self.assertIsNotNone(snake_case )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
1
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self ) -> Any: super().__init__() _UpperCAmelCase = nn.Linear(3 , 4 ) _UpperCAmelCase = nn.BatchNormad(4 ) _UpperCAmelCase = nn.Linear(4 , 5 ) def lowerCamelCase_ ( self , snake_case ) -> Any: return self.lineara(self.batchnorm(self.lineara(snake_case ) ) ) class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case , *snake_case , **snake_case ) -> int: return (args[0] + 1,) + args[1:], kwargs class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case , snake_case ) -> str: return output + 1 class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = ModelForTest() _UpperCAmelCase = ModelHook() add_hook_to_module(snake_case , snake_case ) self.assertEqual(test_model._hf_hook , snake_case ) self.assertTrue(hasattr(snake_case , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(snake_case ) self.assertFalse(hasattr(snake_case , '_hf_hook' ) ) self.assertFalse(hasattr(snake_case , '_old_forward' ) ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = ModelForTest() _UpperCAmelCase = ModelHook() add_hook_to_module(snake_case , snake_case ) add_hook_to_module(snake_case , snake_case , append=snake_case ) self.assertEqual(isinstance(test_model._hf_hook , snake_case ) , snake_case ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(snake_case , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(snake_case ) self.assertFalse(hasattr(snake_case , '_hf_hook' ) ) self.assertFalse(hasattr(snake_case , '_old_forward' ) ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = ModelForTest() _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = test_model(x + 1 ) _UpperCAmelCase = test_model(x + 2 ) _UpperCAmelCase = PreForwardHook() add_hook_to_module(snake_case , snake_case ) _UpperCAmelCase = test_model(snake_case ) self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _UpperCAmelCase = PreForwardHook() add_hook_to_module(snake_case , snake_case ) _UpperCAmelCase = test_model(snake_case ) self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _UpperCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(snake_case , snake_case ) _UpperCAmelCase = test_model(snake_case ) assert torch.allclose(snake_case , snake_case , atol=1E-5 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = ModelForTest() _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = test_model(snake_case ) _UpperCAmelCase = PostForwardHook() add_hook_to_module(snake_case , snake_case ) _UpperCAmelCase = test_model(snake_case ) self.assertTrue(torch.allclose(snake_case , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _UpperCAmelCase = PostForwardHook() add_hook_to_module(snake_case , snake_case ) _UpperCAmelCase = test_model(snake_case ) self.assertTrue(torch.allclose(snake_case , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _UpperCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(snake_case , snake_case ) _UpperCAmelCase = test_model(snake_case ) assert torch.allclose(snake_case , output + 2 , atol=1E-5 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = ModelForTest() _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = test_model(snake_case ) _UpperCAmelCase = PostForwardHook() add_hook_to_module(snake_case , snake_case ) _UpperCAmelCase = test_model(snake_case ) self.assertTrue(torch.allclose(snake_case , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _UpperCAmelCase = True _UpperCAmelCase = test_model(snake_case ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = model(snake_case ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(snake_case , AlignDevicesHook(io_same_device=snake_case ) ) _UpperCAmelCase = torch.randn(2 , 3 ).to(0 ) _UpperCAmelCase = model(snake_case ) self.assertEqual(output.device , torch.device(0 ) ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _UpperCAmelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCAmelCase = torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , snake_case ) _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = model(snake_case ) self.assertEqual(output.device , snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload _UpperCAmelCase = { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = model(snake_case ) self.assertEqual(output.device , snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(snake_case , execution_device=snake_case , offload=snake_case ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCAmelCase = torch.device(snake_case ) self.assertEqual(model.batchnorm.running_mean.device , snake_case ) _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = model(snake_case ) self.assertEqual(output.device , snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(snake_case , execution_device=snake_case , offload=snake_case , offload_buffers=snake_case ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = model(snake_case ) self.assertEqual(output.device , snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( snake_case , execution_device=snake_case , offload=snake_case , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCAmelCase = torch.device(snake_case ) self.assertEqual(model.batchnorm.running_mean.device , snake_case ) _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = model(snake_case ) self.assertEqual(output.device , snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( snake_case , execution_device=snake_case , offload=snake_case , weights_map=model.state_dict() , offload_buffers=snake_case , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _UpperCAmelCase = torch.randn(2 , 3 ) _UpperCAmelCase = model(snake_case ) self.assertEqual(output.device , snake_case ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowercase = logging.get_logger(__name__) @add_end_docstrings(A ) class lowercase__ ( A ): '''simple docstring''' def __init__( self , **snake_case ) -> List[Any]: super().__init__(**snake_case ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , snake_case , **snake_case ) -> Tuple: return super().__call__(snake_case , **snake_case ) def lowerCamelCase_ ( self , **snake_case ) -> str: _UpperCAmelCase = {} if "candidate_labels" in kwargs: _UpperCAmelCase = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _UpperCAmelCase = kwargs['hypothesis_template'] return preprocess_params, {}, {} def lowerCamelCase_ ( self , snake_case , snake_case=None , snake_case="This is a photo of {}." ) -> int: _UpperCAmelCase = load_image(snake_case ) _UpperCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework ) _UpperCAmelCase = candidate_labels _UpperCAmelCase = [hypothesis_template.format(snake_case ) for x in candidate_labels] _UpperCAmelCase = self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case ) _UpperCAmelCase = [text_inputs] return inputs def lowerCamelCase_ ( self , snake_case ) -> Optional[int]: _UpperCAmelCase = model_inputs.pop('candidate_labels' ) _UpperCAmelCase = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , snake_case ): _UpperCAmelCase = text_inputs[0] else: # Batching case. _UpperCAmelCase = text_inputs[0][0] _UpperCAmelCase = self.model(**snake_case , **snake_case ) _UpperCAmelCase = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def lowerCamelCase_ ( self , snake_case ) -> Optional[int]: _UpperCAmelCase = model_outputs.pop('candidate_labels' ) _UpperCAmelCase = model_outputs['logits'][0] if self.framework == "pt": _UpperCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 ) _UpperCAmelCase = probs.tolist() if not isinstance(snake_case , snake_case ): _UpperCAmelCase = [scores] elif self.framework == "tf": _UpperCAmelCase = stable_softmax(snake_case , axis=-1 ) _UpperCAmelCase = probs.numpy().tolist() else: raise ValueError(f'Unsupported framework: {self.framework}' ) _UpperCAmelCase = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(snake_case , snake_case ) , key=lambda snake_case : -x[0] ) ] return result
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging lowercase = logging.get_logger(__name__) lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED lowercase = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } lowercase = { '''allenai/led-base-16384''': 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _UpperCAmelCase = bs[:] _UpperCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(A ) cs.append(2**8 + n ) n += 1 _UpperCAmelCase = [chr(A ) for n in cs] return dict(zip(A , A ) ) def UpperCAmelCase ( A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _UpperCAmelCase = char return pairs class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ) -> int: _UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token _UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token _UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token _UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token _UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token _UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , ) with open(snake_case , encoding='utf-8' ) as vocab_handle: _UpperCAmelCase = json.load(snake_case ) _UpperCAmelCase = {v: k for k, v in self.encoder.items()} _UpperCAmelCase = errors # how to handle errors in decoding _UpperCAmelCase = bytes_to_unicode() _UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(snake_case , encoding='utf-8' ) as merges_handle: _UpperCAmelCase = merges_handle.read().split('\n' )[1:-1] _UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] _UpperCAmelCase = dict(zip(snake_case , range(len(snake_case ) ) ) ) _UpperCAmelCase = {} _UpperCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _UpperCAmelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self ) -> Any: return len(self.encoder ) def lowerCamelCase_ ( self ) -> Optional[int]: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self , snake_case ) -> List[Any]: if token in self.cache: return self.cache[token] _UpperCAmelCase = tuple(snake_case ) _UpperCAmelCase = get_pairs(snake_case ) if not pairs: return token while True: _UpperCAmelCase = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _UpperCAmelCase , _UpperCAmelCase = bigram _UpperCAmelCase = [] _UpperCAmelCase = 0 while i < len(snake_case ): try: _UpperCAmelCase = word.index(snake_case , snake_case ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _UpperCAmelCase = j if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _UpperCAmelCase = tuple(snake_case ) _UpperCAmelCase = new_word if len(snake_case ) == 1: break else: _UpperCAmelCase = get_pairs(snake_case ) _UpperCAmelCase = ' '.join(snake_case ) _UpperCAmelCase = word return word def lowerCamelCase_ ( self , snake_case ) -> int: _UpperCAmelCase = [] for token in re.findall(self.pat , snake_case ): _UpperCAmelCase = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(' ' ) ) return bpe_tokens def lowerCamelCase_ ( self , snake_case ) -> Tuple: return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self , snake_case ) -> Dict: return self.decoder.get(snake_case ) def lowerCamelCase_ ( self , snake_case ) -> Optional[int]: _UpperCAmelCase = ''.join(snake_case ) _UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> Tuple[str]: if not os.path.isdir(snake_case ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _UpperCAmelCase = os.path.join( snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(snake_case , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' ) _UpperCAmelCase = 0 with open(snake_case , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!' ) _UpperCAmelCase = token_index writer.write(' '.join(snake_case ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] _UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) if token_ids_a is None: return [1] + ([0] * len(snake_case )) + [1] return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1] def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]: _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self , snake_case , snake_case=False , **snake_case ) -> List[Any]: _UpperCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()): _UpperCAmelCase = ' ' + text return (text, kwargs) def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ) -> dict: _UpperCAmelCase = super()._pad( encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , ) # Load from model defaults if return_attention_mask is None: _UpperCAmelCase = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _UpperCAmelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _UpperCAmelCase = len(encoded_inputs['global_attention_mask'] ) != len(snake_case ) if needs_to_be_padded: _UpperCAmelCase = len(snake_case ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _UpperCAmelCase = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": _UpperCAmelCase = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''roberta''' def __init__( self , snake_case=50265 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ) -> Tuple: super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class lowercase__ ( A ): '''simple docstring''' @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCAmelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging lowercase = logging.get_logger(__name__) lowercase = r''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class lowercase__ ( A ): '''simple docstring''' @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ) -> bool: raise NotImplementedError('StoppingCriteria needs to be subclassed' ) class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case , snake_case = None ) -> Optional[int]: _UpperCAmelCase = max_length _UpperCAmelCase = max_position_embeddings @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ) -> bool: _UpperCAmelCase = input_ids.shape[-1] _UpperCAmelCase = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( 'This is a friendly reminder - the current text generation call will exceed the model\'s predefined ' f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ' 'exceptions, performance degradation, or nothing at all.' ) return is_done class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case , snake_case ) -> Tuple: warnings.warn( 'The class `MaxNewTokensCriteria` is deprecated. ' f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ' 'with `max_length = start_length + max_new_tokens` instead.' , snake_case , ) _UpperCAmelCase = start_length _UpperCAmelCase = max_new_tokens _UpperCAmelCase = start_length + max_new_tokens @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ) -> bool: return input_ids.shape[-1] >= self.max_length class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case , snake_case = None ) -> int: _UpperCAmelCase = max_time _UpperCAmelCase = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowercase__ ( A ): '''simple docstring''' @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , **snake_case ) -> bool: return any(criteria(snake_case , snake_case ) for criteria in self ) @property def lowerCamelCase_ ( self ) -> Optional[int]: for stopping_criterium in self: if isinstance(snake_case , snake_case ): return stopping_criterium.max_length elif isinstance(snake_case , snake_case ): return stopping_criterium.max_length return None def UpperCAmelCase ( A : StoppingCriteriaList , A : int ): '''simple docstring''' _UpperCAmelCase = stopping_criteria.max_length _UpperCAmelCase = deepcopy(A ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , A ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=A ) ) return new_stopping_criteria
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
1
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = 42 @flax_register_to_config class lowercase__ ( nn.Module, A, A ): '''simple docstring''' _UpperCAmelCase = 32 _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _UpperCAmelCase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") _UpperCAmelCase = False _UpperCAmelCase = (3_20, 6_40, 12_80, 12_80) _UpperCAmelCase = 2 _UpperCAmelCase = 8 _UpperCAmelCase = None _UpperCAmelCase = 12_80 _UpperCAmelCase = 0.0 _UpperCAmelCase = False _UpperCAmelCase = jnp.floataa _UpperCAmelCase = True _UpperCAmelCase = 0 _UpperCAmelCase = False def lowerCamelCase_ ( self , snake_case ) -> FrozenDict: # init input tensors _UpperCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCAmelCase = jnp.zeros(snake_case , dtype=jnp.floataa ) _UpperCAmelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCAmelCase , _UpperCAmelCase = jax.random.split(snake_case ) _UpperCAmelCase = {'params': params_rng, 'dropout': dropout_rng} return self.init(snake_case , snake_case , snake_case , snake_case )["params"] def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = self.block_out_channels _UpperCAmelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( 'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCAmelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCAmelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCAmelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCAmelCase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype ) _UpperCAmelCase = self.only_cross_attention if isinstance(snake_case , snake_case ): _UpperCAmelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(snake_case , snake_case ): _UpperCAmelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCAmelCase = [] _UpperCAmelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCAmelCase = output_channel _UpperCAmelCase = block_out_channels[i] _UpperCAmelCase = i == len(snake_case ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCAmelCase = FlaxCrossAttnDownBlockaD( in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCAmelCase = FlaxDownBlockaD( in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(snake_case ) _UpperCAmelCase = down_blocks # mid _UpperCAmelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _UpperCAmelCase = [] _UpperCAmelCase = list(reversed(snake_case ) ) _UpperCAmelCase = list(reversed(snake_case ) ) _UpperCAmelCase = list(reversed(snake_case ) ) _UpperCAmelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCAmelCase = output_channel _UpperCAmelCase = reversed_block_out_channels[i] _UpperCAmelCase = reversed_block_out_channels[min(i + 1 , len(snake_case ) - 1 )] _UpperCAmelCase = i == len(snake_case ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCAmelCase = FlaxCrossAttnUpBlockaD( in_channels=snake_case , out_channels=snake_case , prev_output_channel=snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCAmelCase = FlaxUpBlockaD( in_channels=snake_case , out_channels=snake_case , prev_output_channel=snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(snake_case ) _UpperCAmelCase = output_channel _UpperCAmelCase = up_blocks # out _UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) _UpperCAmelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case = True , snake_case = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: # 1. time if not isinstance(snake_case , jnp.ndarray ): _UpperCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCAmelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCAmelCase = jnp.expand_dims(snake_case , 0 ) _UpperCAmelCase = self.time_proj(snake_case ) _UpperCAmelCase = self.time_embedding(snake_case ) # 2. pre-process _UpperCAmelCase = jnp.transpose(snake_case , (0, 2, 3, 1) ) _UpperCAmelCase = self.conv_in(snake_case ) # 3. down _UpperCAmelCase = (sample,) for down_block in self.down_blocks: if isinstance(snake_case , snake_case ): _UpperCAmelCase , _UpperCAmelCase = down_block(snake_case , snake_case , snake_case , deterministic=not train ) else: _UpperCAmelCase , _UpperCAmelCase = down_block(snake_case , snake_case , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCAmelCase = () for down_block_res_sample, down_block_additional_residual in zip( snake_case , snake_case ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCAmelCase = new_down_block_res_samples # 4. mid _UpperCAmelCase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCAmelCase = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCAmelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(snake_case , snake_case ): _UpperCAmelCase = up_block( snake_case , temb=snake_case , encoder_hidden_states=snake_case , res_hidden_states_tuple=snake_case , deterministic=not train , ) else: _UpperCAmelCase = up_block(snake_case , temb=snake_case , res_hidden_states_tuple=snake_case , deterministic=not train ) # 6. post-process _UpperCAmelCase = self.conv_norm_out(snake_case ) _UpperCAmelCase = nn.silu(snake_case ) _UpperCAmelCase = self.conv_out(snake_case ) _UpperCAmelCase = jnp.transpose(snake_case , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=snake_case )
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
1
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( A : int = 3 ): '''simple docstring''' if isinstance(A , A ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(A ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 10: raise ValueError('number of qubits too large to simulate(>10).' ) _UpperCAmelCase = QuantumRegister(A , 'qr' ) _UpperCAmelCase = ClassicalRegister(A , 'cr' ) _UpperCAmelCase = QuantumCircuit(A , A ) _UpperCAmelCase = number_of_qubits for i in range(A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , A , A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(A , A ) # simulate with 10000 shots _UpperCAmelCase = Aer.get_backend('qasm_simulator' ) _UpperCAmelCase = execute(A , A , shots=1_0000 ) return job.result().get_counts(A ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
1
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowercase = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. lowercase = importlib.util.spec_from_file_location( '''transformers''', os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) lowercase = spec.loader.load_module() lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowercase = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') lowercase = { '''CLIPConfigMixin''', '''DecisionTransformerConfigMixin''', '''EncoderDecoderConfigMixin''', '''RagConfigMixin''', '''SpeechEncoderDecoderConfigMixin''', '''VisionEncoderDecoderConfigMixin''', '''VisionTextDualEncoderConfigMixin''', } def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = [] for config_class in list(CONFIG_MAPPING.values() ): _UpperCAmelCase = False # source code of `config_class` _UpperCAmelCase = inspect.getsource(A ) _UpperCAmelCase = _re_checkpoint.findall(A ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _UpperCAmelCase , _UpperCAmelCase = checkpoint # verify the checkpoint name corresponds to the checkpoint link _UpperCAmelCase = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: _UpperCAmelCase = True break _UpperCAmelCase = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(A ) if len(A ) > 0: _UpperCAmelCase = '\n'.join(sorted(A ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = CycleDiffusionPipeline _UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } _UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} _UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) _UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase_ ( self ) -> int: torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _UpperCAmelCase = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _UpperCAmelCase = CLIPTextModel(snake_case ) _UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _UpperCAmelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case ) _UpperCAmelCase = image / 2 + 0.5 if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'prompt': 'An astronaut riding an elephant', 'source_prompt': 'An astronaut riding a horse', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'eta': 0.1, 'strength': 0.8, 'guidance_scale': 3, 'source_guidance_scale': 1, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = CycleDiffusionPipeline(**snake_case ) _UpperCAmelCase = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ) _UpperCAmelCase = output.images _UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = self.get_dummy_components() for name, module in components.items(): if hasattr(snake_case , 'half' ): _UpperCAmelCase = module.half() _UpperCAmelCase = CycleDiffusionPipeline(**snake_case ) _UpperCAmelCase = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ) _UpperCAmelCase = output.images _UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCamelCase_ ( self ) -> Any: return super().test_save_load_local() @unittest.skip('non-deterministic pipeline' ) def lowerCamelCase_ ( self ) -> str: return super().test_inference_batch_single_identical() @skip_mps def lowerCamelCase_ ( self ) -> Any: return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCamelCase_ ( self ) -> Tuple: return super().test_save_load_optional_components() @skip_mps def lowerCamelCase_ ( self ) -> List[Any]: return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/cycle-diffusion/black_colored_car.png' ) _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' ) _UpperCAmelCase = init_image.resize((512, 512) ) _UpperCAmelCase = 'CompVis/stable-diffusion-v1-4' _UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case , subfolder='scheduler' ) _UpperCAmelCase = CycleDiffusionPipeline.from_pretrained( snake_case , scheduler=snake_case , safety_checker=snake_case , torch_dtype=torch.floataa , revision='fp16' ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() _UpperCAmelCase = 'A black colored car' _UpperCAmelCase = 'A blue colored car' _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe( prompt=snake_case , source_prompt=snake_case , image=snake_case , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case , output_type='np' , ) _UpperCAmelCase = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/cycle-diffusion/black_colored_car.png' ) _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' ) _UpperCAmelCase = init_image.resize((512, 512) ) _UpperCAmelCase = 'CompVis/stable-diffusion-v1-4' _UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case , subfolder='scheduler' ) _UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(snake_case , scheduler=snake_case , safety_checker=snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() _UpperCAmelCase = 'A black colored car' _UpperCAmelCase = 'A blue colored car' _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe( prompt=snake_case , source_prompt=snake_case , image=snake_case , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case , output_type='np' , ) _UpperCAmelCase = output.images assert np.abs(image - expected_image ).max() < 2E-2
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) _UpperCAmelCase = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above _UpperCAmelCase = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above _UpperCAmelCase = tf_top_k_top_p_filtering(snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) _UpperCAmelCase = output[output != -float('inf' )] _UpperCAmelCase = tf.cast( tf.where(tf.not_equal(snake_case , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(snake_case , snake_case , rtol=1E-12 ) tf.debugging.assert_equal(snake_case , snake_case ) @require_tf class lowercase__ ( unittest.TestCase, A ): '''simple docstring''' if is_tf_available(): _UpperCAmelCase = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowerCamelCase_ ( self ) -> List[str]: # TF-only test: tf.saved_model export _UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) _UpperCAmelCase = 2 _UpperCAmelCase = 2 class lowercase__ ( tf.Module ): '''simple docstring''' def __init__( self , snake_case ) -> Optional[Any]: super(snake_case , self ).__init__() _UpperCAmelCase = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ), ) , jit_compile=snake_case , ) def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.model.generate( input_ids=snake_case , attention_mask=snake_case , max_new_tokens=snake_case , return_dict_in_generate=snake_case , ) return {"sequences": outputs["sequences"]} _UpperCAmelCase = [[2, 0], [102, 103]] _UpperCAmelCase = [[1, 0], [1, 1]] _UpperCAmelCase = DummyModel(model=snake_case ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(snake_case , snake_case , signatures={'serving_default': dummy_model.serving} ) _UpperCAmelCase = tf.saved_model.load(snake_case ).signatures['serving_default'] for batch_size in range(1 , len(snake_case ) + 1 ): _UpperCAmelCase = { 'input_ids': tf.constant(dummy_input_ids[:batch_size] ), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ), } _UpperCAmelCase = serving_func(**snake_case )['sequences'] _UpperCAmelCase = test_model.generate(**snake_case , max_new_tokens=snake_case ) tf.debugging.assert_equal(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> int: # TF-only test: tf.saved_model export _UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) _UpperCAmelCase = 1 _UpperCAmelCase = 2 class lowercase__ ( tf.Module ): '''simple docstring''' def __init__( self , snake_case ) -> str: super(snake_case , self ).__init__() _UpperCAmelCase = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ), ) , jit_compile=snake_case , ) def lowerCamelCase_ ( self , snake_case , snake_case ) -> int: _UpperCAmelCase = self.model.generate( input_ids=snake_case , attention_mask=snake_case , max_new_tokens=snake_case , return_dict_in_generate=snake_case , ) return {"sequences": outputs["sequences"]} _UpperCAmelCase = [[2], [102, 103]] _UpperCAmelCase = [[1], [1, 1]] _UpperCAmelCase = DummyModel(model=snake_case ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(snake_case , snake_case , signatures={'serving_default': dummy_model.serving} ) _UpperCAmelCase = tf.saved_model.load(snake_case ).signatures['serving_default'] for input_row in range(len(snake_case ) ): _UpperCAmelCase = { 'input_ids': tf.constant([dummy_input_ids[input_row]] ), 'attention_mask': tf.constant([dummy_attention_masks[input_row]] ), } _UpperCAmelCase = serving_func(**snake_case )['sequences'] _UpperCAmelCase = test_model.generate(**snake_case , max_new_tokens=snake_case ) tf.debugging.assert_equal(snake_case , snake_case ) @slow @require_tensorflow_text def lowerCamelCase_ ( self ) -> Any: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=snake_case ) class lowercase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self ) -> Dict: super().__init__() _UpperCAmelCase = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(snake_case , 'spiece.model' ) , 'rb' ).read() ) _UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' ) def lowerCamelCase_ ( self , snake_case , *snake_case , **snake_case ) -> str: _UpperCAmelCase = self.tokenizer.tokenize(snake_case ) _UpperCAmelCase , _UpperCAmelCase = text.pad_model_inputs( snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) _UpperCAmelCase = self.model.generate(input_ids=snake_case , attention_mask=snake_case ) return self.tokenizer.detokenize(snake_case ) _UpperCAmelCase = CompleteSentenceTransformer() _UpperCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' ) _UpperCAmelCase = complete_model(snake_case ) _UpperCAmelCase = tf.keras.Model(snake_case , snake_case ) keras_model.save(snake_case ) def lowerCamelCase_ ( self ) -> List[Any]: # Has PT equivalent: this test relies on random sampling _UpperCAmelCase = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7, } _UpperCAmelCase = 14 _UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) _UpperCAmelCase = 'Hello, my dog is cute and' _UpperCAmelCase = tokenizer(snake_case , return_tensors='tf' ) _UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) _UpperCAmelCase = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) _UpperCAmelCase = model.generate(**snake_case , eos_token_id=snake_case , **snake_case ) self.assertTrue(expectation == len(generated_tokens[0] ) ) _UpperCAmelCase = [638, 198] with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) _UpperCAmelCase = model.generate(**snake_case , eos_token_id=snake_case , **snake_case ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCamelCase_ ( self ) -> Dict: # Has PT equivalent: ample use of framework-specific code _UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' ) _UpperCAmelCase = 'Hugging Face is a technology company based in New York and Paris.' _UpperCAmelCase = bart_tokenizer(snake_case , return_tensors='tf' ).input_ids _UpperCAmelCase = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' ) _UpperCAmelCase = bart_model.generate(snake_case ).numpy() class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case , snake_case=None , **snake_case ) -> str: return super().call(snake_case , **snake_case ) _UpperCAmelCase = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' ) _UpperCAmelCase = bart_model.generate(snake_case , foo='bar' ).numpy() self.assertTrue(np.array_equal(snake_case , snake_case ) ) class lowercase__ ( bart_model.model.encoder.__class__ ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case , **snake_case ) -> int: return super().call(snake_case , **snake_case ) _UpperCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared ) _UpperCAmelCase = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) _UpperCAmelCase = bart_model.generate(snake_case ).numpy() with self.assertRaises(snake_case ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(snake_case , foo='bar' )
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
1
"""simple docstring""" import argparse from collections import defaultdict import yaml lowercase = '''docs/source/en/_toctree.yml''' def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = defaultdict(A ) _UpperCAmelCase = [] _UpperCAmelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(A ) _UpperCAmelCase = new_doc_list _UpperCAmelCase = [key for key, value in counts.items() if value > 1] _UpperCAmelCase = [] for duplicate_key in duplicates: _UpperCAmelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(A ) > 1: raise ValueError( f'{duplicate_key} is present several times in the documentation table of content at ' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) _UpperCAmelCase = sorted(A , key=lambda A : s["title"].lower() ) # "overview" gets special treatment and is always first if len(A ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(A ) # Sort return overview_doc def UpperCAmelCase ( A : int=False ): '''simple docstring''' with open(A , encoding='utf-8' ) as f: _UpperCAmelCase = yaml.safe_load(f.read() ) # Get to the API doc _UpperCAmelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 _UpperCAmelCase = content[api_idx]['sections'] # Then to the model doc _UpperCAmelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _UpperCAmelCase = api_doc[scheduler_idx]['sections'] _UpperCAmelCase = clean_doc_toc(A ) _UpperCAmelCase = False if new_scheduler_doc != scheduler_doc: _UpperCAmelCase = True if overwrite: _UpperCAmelCase = new_scheduler_doc if diff: if overwrite: _UpperCAmelCase = api_doc with open(A , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(A , allow_unicode=A ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def UpperCAmelCase ( A : str=False ): '''simple docstring''' with open(A , encoding='utf-8' ) as f: _UpperCAmelCase = yaml.safe_load(f.read() ) # Get to the API doc _UpperCAmelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 _UpperCAmelCase = content[api_idx]['sections'] # Then to the model doc _UpperCAmelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _UpperCAmelCase = False _UpperCAmelCase = api_doc[pipeline_idx]['sections'] _UpperCAmelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _UpperCAmelCase = pipeline_doc['section'] _UpperCAmelCase = clean_doc_toc(A ) if overwrite: _UpperCAmelCase = new_sub_pipeline_doc new_pipeline_docs.append(A ) # sort overall pipeline doc _UpperCAmelCase = clean_doc_toc(A ) if new_pipeline_docs != pipeline_docs: _UpperCAmelCase = True if overwrite: _UpperCAmelCase = new_pipeline_docs if diff: if overwrite: _UpperCAmelCase = api_doc with open(A , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(A , allow_unicode=A ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowercase = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
1
"""simple docstring""" def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' while b: _UpperCAmelCase , _UpperCAmelCase = b, a % b return a def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(A , a % b ) def UpperCAmelCase ( ): '''simple docstring''' print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' ) print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' ) print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' ) print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' ) print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' ) print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' ) print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' ) print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' ) print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' ) print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' ) if __name__ == "__main__": main()
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
1
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowercase = '''\ ''' lowercase = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' lowercase = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case = 16 , snake_case = True , snake_case=None ) -> Optional[int]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = 'cuda' else: _UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(snake_case ) _UpperCAmelCase = model.to(snake_case ) _UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(snake_case ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors='pt' , return_attention_mask=snake_case , ).to(snake_case ) _UpperCAmelCase = encodings['input_ids'] _UpperCAmelCase = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(snake_case ) , snake_case ) ): _UpperCAmelCase = min(start_index + batch_size , len(snake_case ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(snake_case , attention_mask=snake_case ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , snake_case ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case )}
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
1
"""simple docstring""" from sklearn.metrics import fa_score import datasets lowercase = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' lowercase = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' lowercase = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): '''simple docstring''' def lowerCamelCase_ ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=1 , snake_case="binary" , snake_case=None ) -> Optional[Any]: _UpperCAmelCase = fa_score( snake_case , snake_case , labels=snake_case , pos_label=snake_case , average=snake_case , sample_weight=snake_case ) return {"f1": float(snake_case ) if score.size == 1 else score}
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
1
"""simple docstring""" import requests from bsa import BeautifulSoup def UpperCAmelCase ( A : str , A : dict ): '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(A , params=A ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": lowercase = { '''title''': ( '''Precisely geometry controlled microsupercapacitors for ultrahigh areal ''' '''capacitance, volumetric capacitance, and energy density''' ), '''journal''': '''Chem. Mater.''', '''volume''': 30, '''pages''': '''3979-3990''', '''year''': 20_18, '''hl''': '''en''', } print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowercase = transforms.Compose( [ transforms.Resize((2_56, 2_56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def UpperCAmelCase ( A : Optional[Any] ): '''simple docstring''' if isinstance(A , torch.Tensor ): return image elif isinstance(A , PIL.Image.Image ): _UpperCAmelCase = [image] _UpperCAmelCase = [trans(img.convert('RGB' ) ) for img in image] _UpperCAmelCase = torch.stack(A ) return image class lowercase__ ( A ): '''simple docstring''' def __init__( self , snake_case , snake_case ) -> List[Any]: super().__init__() # make sure scheduler can always be converted to DDIM _UpperCAmelCase = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=snake_case , scheduler=snake_case ) def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]: if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> str: # get the original timestep using init_timestep _UpperCAmelCase = min(int(num_inference_steps * strength ) , snake_case ) _UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) _UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ) -> List[str]: if not isinstance(snake_case , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case )}' ) _UpperCAmelCase = image.to(device=snake_case , dtype=snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(snake_case )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) _UpperCAmelCase = init_latents.shape _UpperCAmelCase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case ) # get latents print('add noise to latents at timestep' , snake_case ) _UpperCAmelCase = self.scheduler.add_noise(snake_case , snake_case , snake_case ) _UpperCAmelCase = init_latents return latents @torch.no_grad() def __call__( self , snake_case = None , snake_case = 0.8 , snake_case = 1 , snake_case = None , snake_case = 0.0 , snake_case = 50 , snake_case = None , snake_case = "pil" , snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]: self.check_inputs(snake_case ) # 2. Preprocess image _UpperCAmelCase = preprocess(snake_case ) # 3. set timesteps self.scheduler.set_timesteps(snake_case , device=self.device ) _UpperCAmelCase , _UpperCAmelCase = self.get_timesteps(snake_case , snake_case , self.device ) _UpperCAmelCase = timesteps[:1].repeat(snake_case ) # 4. Prepare latent variables _UpperCAmelCase = self.prepare_latents(snake_case , snake_case , snake_case , self.unet.dtype , self.device , snake_case ) _UpperCAmelCase = latents # 5. Denoising loop for t in self.progress_bar(snake_case ): # 1. predict noise model_output _UpperCAmelCase = self.unet(snake_case , snake_case ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step( snake_case , snake_case , snake_case , eta=snake_case , use_clipped_model_output=snake_case , generator=snake_case , ).prev_sample _UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(snake_case ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=snake_case )
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
1
"""simple docstring""" def UpperCAmelCase ( A : list[int] , A : int ): '''simple docstring''' _UpperCAmelCase = len(A ) _UpperCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _UpperCAmelCase = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): _UpperCAmelCase = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: _UpperCAmelCase = subset[i - 1][j] if arr[i - 1] <= j: _UpperCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = '''▁''' lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowercase = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), } } lowercase = { '''facebook/mbart-large-en-ro''': 10_24, '''facebook/mbart-large-cc25''': 10_24, } # fmt: off lowercase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase = [] _UpperCAmelCase = [] def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , snake_case = None , snake_case=None , **snake_case , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , tokenizer_file=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(snake_case ) ) _UpperCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase = 1 _UpperCAmelCase = len(self.sp_model ) _UpperCAmelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case ) } _UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()} _UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _UpperCAmelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _UpperCAmelCase = src_lang if src_lang is not None else 'en_XX' _UpperCAmelCase = self.lang_code_to_id[self._src_lang] _UpperCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Optional[Any]: _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None _UpperCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , snake_case ) -> str: _UpperCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowerCamelCase_ ( self ) -> List[Any]: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCamelCase_ ( self ) -> str: return self._src_lang @src_lang.setter def lowerCamelCase_ ( self , snake_case ) -> None: _UpperCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) _UpperCAmelCase = [1] * len(self.prefix_tokens ) _UpperCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(snake_case )) + suffix_ones return prefix_ones + ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]: _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _UpperCAmelCase = src_lang _UpperCAmelCase = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case ) _UpperCAmelCase = self.convert_tokens_to_ids(snake_case ) _UpperCAmelCase = tgt_lang_id return inputs def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self , snake_case ) -> List[str]: return self.sp_model.encode(snake_case , out_type=snake_case ) def lowerCamelCase_ ( self , snake_case ) -> List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase = self.sp_model.PieceToId(snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase_ ( self , snake_case ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]: _UpperCAmelCase = ''.join(snake_case ).replace(snake_case , ' ' ).strip() return out_string def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> Tuple[str]: if not os.path.isdir(snake_case ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , 'wb' ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,) def lowerCamelCase_ ( self , snake_case , snake_case = "en_XX" , snake_case = None , snake_case = "ro_RO" , **snake_case , ) -> BatchEncoding: _UpperCAmelCase = src_lang _UpperCAmelCase = tgt_lang return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case ) def lowerCamelCase_ ( self ) -> str: return self.set_src_lang_special_tokens(self.src_lang ) def lowerCamelCase_ ( self ) -> str: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCamelCase_ ( self , snake_case ) -> None: _UpperCAmelCase = self.lang_code_to_id[src_lang] _UpperCAmelCase = [] _UpperCAmelCase = [self.eos_token_id, self.cur_lang_code] def lowerCamelCase_ ( self , snake_case ) -> None: _UpperCAmelCase = self.lang_code_to_id[lang] _UpperCAmelCase = [] _UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" lowercase = { 0: '''0''', 1: '''1''', 2: '''2''', 3: '''3''', 4: '''4''', 5: '''5''', 6: '''6''', 7: '''7''', 8: '''8''', 9: '''9''', 10: '''a''', 11: '''b''', 12: '''c''', 13: '''d''', 14: '''e''', 15: '''f''', } def UpperCAmelCase ( A : float ): '''simple docstring''' assert type(A ) in (int, float) and decimal == int(A ) _UpperCAmelCase = int(A ) _UpperCAmelCase = '' _UpperCAmelCase = False if decimal < 0: _UpperCAmelCase = True decimal *= -1 while decimal > 0: _UpperCAmelCase , _UpperCAmelCase = divmod(A , 16 ) _UpperCAmelCase = values[remainder] + hexadecimal _UpperCAmelCase = '0x' + hexadecimal if negative: _UpperCAmelCase = '-' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=A ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = field(default='''question-answering-extractive''', metadata={'''include_in_asdict_even_if_is_default''': True} ) _UpperCAmelCase = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) _UpperCAmelCase = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) _UpperCAmelCase = "question" _UpperCAmelCase = "context" _UpperCAmelCase = "answers" @property def lowerCamelCase_ ( self ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = KandinskyImgaImgPipeline _UpperCAmelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] _UpperCAmelCase = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] _UpperCAmelCase = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCAmelCase = False @property def lowerCamelCase_ ( self ) -> List[Any]: return 32 @property def lowerCamelCase_ ( self ) -> Dict: return 32 @property def lowerCamelCase_ ( self ) -> Optional[Any]: return self.time_input_dim @property def lowerCamelCase_ ( self ) -> List[Any]: return self.time_input_dim * 4 @property def lowerCamelCase_ ( self ) -> Any: return 100 @property def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def lowerCamelCase_ ( self ) -> List[Any]: torch.manual_seed(0 ) _UpperCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) _UpperCAmelCase = MultilingualCLIP(snake_case ) _UpperCAmelCase = text_encoder.eval() return text_encoder @property def lowerCamelCase_ ( self ) -> Optional[Any]: torch.manual_seed(0 ) _UpperCAmelCase = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _UpperCAmelCase = UNetaDConditionModel(**snake_case ) return model @property def lowerCamelCase_ ( self ) -> int: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase_ ( self ) -> Optional[Any]: torch.manual_seed(0 ) _UpperCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = self.dummy_text_encoder _UpperCAmelCase = self.dummy_tokenizer _UpperCAmelCase = self.dummy_unet _UpperCAmelCase = self.dummy_movq _UpperCAmelCase = { 'num_train_timesteps': 1000, 'beta_schedule': 'linear', 'beta_start': 0.00085, 'beta_end': 0.012, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } _UpperCAmelCase = DDIMScheduler(**snake_case ) _UpperCAmelCase = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> List[str]: _UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case ) ).to(snake_case ) _UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case ) # create init_image _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case ) ).to(snake_case ) _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCAmelCase = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((256, 256) ) if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'prompt': 'horse', 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) _UpperCAmelCase = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = pipe(**self.get_dummy_inputs(snake_case ) ) _UpperCAmelCase = output.images _UpperCAmelCase = pipe( **self.get_dummy_inputs(snake_case ) , return_dict=snake_case , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_img2img_frog.npy' ) _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _UpperCAmelCase = 'A red cartoon frog, 4k' _UpperCAmelCase = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case ) _UpperCAmelCase = KandinskyImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa ) _UpperCAmelCase = pipeline.to(snake_case ) pipeline.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) _UpperCAmelCase , _UpperCAmelCase = pipe_prior( snake_case , generator=snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _UpperCAmelCase = pipeline( snake_case , image=snake_case , image_embeds=snake_case , negative_image_embeds=snake_case , generator=snake_case , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case , snake_case )
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''LlamaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''LlamaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''LlamaForCausalLM''', '''LlamaModel''', '''LlamaPreTrainedModel''', '''LlamaForSequenceClassification''', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
1
"""simple docstring""" def UpperCAmelCase ( A : list[int] ): '''simple docstring''' _UpperCAmelCase = [] if len(A ) == 1: return [nums.copy()] for _ in range(len(A ) ): _UpperCAmelCase = nums.pop(0 ) _UpperCAmelCase = permute(A ) for perm in permutations: perm.append(A ) result.extend(A ) nums.append(A ) return result def UpperCAmelCase ( A : List[str] ): '''simple docstring''' def backtrack(A : Any ): if start == len(A ) - 1: output.append(nums[:] ) else: for i in range(A , len(A ) ): _UpperCAmelCase , _UpperCAmelCase = nums[i], nums[start] backtrack(start + 1 ) _UpperCAmelCase , _UpperCAmelCase = nums[i], nums[start] # backtrack _UpperCAmelCase = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowercase = permutea([1, 2, 3]) print(res) doctest.testmod()
24
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
1
"""simple docstring""" from collections import deque def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = len(A ) _UpperCAmelCase = deque() _UpperCAmelCase = [False for _ in range(A )] _UpperCAmelCase = [-1 for _ in range(A )] _UpperCAmelCase = index_of[:] def strong_connect(A : List[Any] , A : List[str] , A : Optional[int] ): _UpperCAmelCase = index # the number when this node is seen _UpperCAmelCase = index # lowest rank node reachable from here index += 1 stack.append(A ) _UpperCAmelCase = True for w in g[v]: if index_of[w] == -1: _UpperCAmelCase = strong_connect(A , A , A ) _UpperCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: _UpperCAmelCase = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: _UpperCAmelCase = [] _UpperCAmelCase = stack.pop() _UpperCAmelCase = False component.append(A ) while w != v: _UpperCAmelCase = stack.pop() _UpperCAmelCase = False component.append(A ) components.append(A ) return index _UpperCAmelCase = [] for v in range(A ): if index_of[v] == -1: strong_connect(A , 0 , A ) return components def UpperCAmelCase ( A : List[Any] , A : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = [[] for _ in range(A )] for u, v in edges: g[u].append(A ) return g if __name__ == "__main__": # Test lowercase = 7 lowercase = [0, 0, 1, 2, 3, 3, 4, 4, 6] lowercase = [1, 3, 2, 0, 1, 4, 5, 6, 5] lowercase = [(u, v) for u, v in zip(source, target)] lowercase = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
24
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''wavlm''' def __init__( self , snake_case=32 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=1E-5 , snake_case="group" , snake_case="gelu" , snake_case=(512, 512, 512, 512, 512, 512, 512) , snake_case=(5, 2, 2, 2, 2, 2, 2) , snake_case=(10, 3, 3, 3, 3, 2, 2) , snake_case=False , snake_case=128 , snake_case=16 , snake_case=320 , snake_case=800 , snake_case=False , snake_case=True , snake_case=0.05 , snake_case=10 , snake_case=2 , snake_case=0.0 , snake_case=10 , snake_case=320 , snake_case=2 , snake_case=0.1 , snake_case=100 , snake_case=256 , snake_case=256 , snake_case=0.1 , snake_case="mean" , snake_case=False , snake_case=False , snake_case=256 , snake_case=(512, 512, 512, 512, 1500) , snake_case=(5, 3, 3, 1, 1) , snake_case=(1, 2, 3, 1, 1) , snake_case=512 , snake_case=80 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=False , snake_case=3 , snake_case=2 , snake_case=3 , snake_case=None , **snake_case , ) -> List[str]: super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case ) _UpperCAmelCase = hidden_size _UpperCAmelCase = feat_extract_norm _UpperCAmelCase = feat_extract_activation _UpperCAmelCase = list(snake_case ) _UpperCAmelCase = list(snake_case ) _UpperCAmelCase = list(snake_case ) _UpperCAmelCase = conv_bias _UpperCAmelCase = num_buckets _UpperCAmelCase = max_bucket_distance _UpperCAmelCase = num_conv_pos_embeddings _UpperCAmelCase = num_conv_pos_embedding_groups _UpperCAmelCase = len(self.conv_dim ) _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = feat_proj_dropout _UpperCAmelCase = final_dropout _UpperCAmelCase = layerdrop _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = num_ctc_classes _UpperCAmelCase = vocab_size _UpperCAmelCase = do_stable_layer_norm _UpperCAmelCase = use_weighted_layer_sum _UpperCAmelCase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase = apply_spec_augment _UpperCAmelCase = mask_time_prob _UpperCAmelCase = mask_time_length _UpperCAmelCase = mask_time_min_masks _UpperCAmelCase = mask_feature_prob _UpperCAmelCase = mask_feature_length # parameters for pretraining with codevector quantized representations _UpperCAmelCase = num_codevectors_per_group _UpperCAmelCase = num_codevector_groups _UpperCAmelCase = contrastive_logits_temperature _UpperCAmelCase = num_negatives _UpperCAmelCase = codevector_dim _UpperCAmelCase = proj_codevector_dim _UpperCAmelCase = diversity_loss_weight # ctc loss _UpperCAmelCase = ctc_loss_reduction _UpperCAmelCase = ctc_zero_infinity # adapter _UpperCAmelCase = add_adapter _UpperCAmelCase = adapter_kernel_size _UpperCAmelCase = adapter_stride _UpperCAmelCase = num_adapter_layers _UpperCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _UpperCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _UpperCAmelCase = list(snake_case ) _UpperCAmelCase = list(snake_case ) _UpperCAmelCase = list(snake_case ) _UpperCAmelCase = xvector_output_dim @property def lowerCamelCase_ ( self ) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1 )
24
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def UpperCAmelCase ( A : list[list[float]] ): '''simple docstring''' _UpperCAmelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(A ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix _UpperCAmelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements _UpperCAmelCase = [[0.0, 0.0], [0.0, 0.0]] _UpperCAmelCase , _UpperCAmelCase = matrix[1][1], matrix[0][0] _UpperCAmelCase , _UpperCAmelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(A ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(A ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule _UpperCAmelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix _UpperCAmelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] _UpperCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) _UpperCAmelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) _UpperCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) _UpperCAmelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) _UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) _UpperCAmelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) _UpperCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) _UpperCAmelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) _UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) _UpperCAmelCase = array(A ) for i in range(3 ): for j in range(3 ): _UpperCAmelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix _UpperCAmelCase = array(A ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(A ) # Calculate the inverse of the matrix return [[float(d(A ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
24
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
1
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowercase = logging.getLogger(__name__) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=A , default=1000 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=A , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=A , type=A , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=A , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=A , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _UpperCAmelCase = parser.parse_args() return args def UpperCAmelCase ( A : Tuple ): '''simple docstring''' def fn(A : int ): return tokenizer(examples['text'] ) return fn def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [] for i in range(len(tokenized_data['input_ids'] ) ): _UpperCAmelCase = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _UpperCAmelCase = tf.train.Features(feature=A ) _UpperCAmelCase = tf.train.Example(features=A ) _UpperCAmelCase = example.SerializeToString() records.append(A ) return records def UpperCAmelCase ( A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _UpperCAmelCase = min(len(A ) , args.limit ) _UpperCAmelCase = dataset.select(range(A ) ) print(f'Limiting the dataset to {args.limit} entries.' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(A ): os.makedirs(A ) else: _UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _UpperCAmelCase = tokenize_function(A ) _UpperCAmelCase = dataset.map(A , batched=A , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A : Any ): # Concatenate all texts. _UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} _UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , A , args.max_length )] for k, t in concatenated_examples.items() } return result _UpperCAmelCase = dataset_tokenized.map(A , batched=A , batch_size=1000 , num_proc=4 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 for shard in range(0 , len(A ) , args.shard_size ): _UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] _UpperCAmelCase = len(dataset_snapshot['input_ids'] ) _UpperCAmelCase = os.path.join(A , f'dataset-{shard_count}-{records_containing}.tfrecord' ) _UpperCAmelCase = get_serialized_examples(A ) with tf.io.TFRecordWriter(A ) as out_file: for i in range(len(A ) ): _UpperCAmelCase = serialized_examples[i] out_file.write(A ) print('Wrote file {} containing {} records'.format(A , A ) ) shard_count += 1 total_records += records_containing with open(f'split-{args.split}-records-count.txt' , 'w' ) as f: print(f'Total {args.split} records: {total_records}' , file=A ) if __name__ == "__main__": lowercase = parse_args() main(args)
24
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
1
"""simple docstring""" lowercase = { '''Pillow''': '''Pillow''', '''accelerate''': '''accelerate>=0.11.0''', '''compel''': '''compel==0.1.8''', '''black''': '''black~=23.1''', '''datasets''': '''datasets''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.13.2''', '''requests-mock''': '''requests-mock==1.10.0''', '''importlib_metadata''': '''importlib_metadata''', '''invisible-watermark''': '''invisible-watermark''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2''', '''jaxlib''': '''jaxlib>=0.1.65''', '''Jinja2''': '''Jinja2''', '''k-diffusion''': '''k-diffusion>=0.0.12''', '''torchsde''': '''torchsde''', '''note_seq''': '''note_seq''', '''librosa''': '''librosa''', '''numpy''': '''numpy''', '''omegaconf''': '''omegaconf''', '''parameterized''': '''parameterized''', '''protobuf''': '''protobuf>=3.20.3,<4''', '''pytest''': '''pytest''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''ruff''': '''ruff>=0.0.241''', '''safetensors''': '''safetensors''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''scipy''': '''scipy''', '''onnx''': '''onnx''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''tensorboard''': '''tensorboard''', '''torch''': '''torch>=1.4''', '''torchvision''': '''torchvision''', '''transformers''': '''transformers>=4.25.1''', '''urllib3''': '''urllib3<=2.0.0''', }
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
1
"""simple docstring""" import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowercase = parser.parse_args() if args.check_lib: lowercase = importlib.import_module('''transformers''') lowercase = Path(transformers_module.__file__).parent else: lowercase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
24
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
1
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self , snake_case=2048 , snake_case=1 , snake_case=[16, 16] , snake_case=128 , snake_case=44100 , snake_case=86 , snake_case=2048 , snake_case=0.0 , **snake_case , ) -> Any: super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case , ) _UpperCAmelCase = spectrogram_length _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = feature_size // self.patch_size[1] _UpperCAmelCase = n_fft _UpperCAmelCase = sampling_rate // hop_length_to_sampling_rate _UpperCAmelCase = sampling_rate _UpperCAmelCase = padding_value _UpperCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ).T def lowerCamelCase_ ( self , snake_case ) -> np.ndarray: _UpperCAmelCase = spectrogram( snake_case , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) _UpperCAmelCase = log_spec[:, :-1] _UpperCAmelCase = log_spec - 20.0 _UpperCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , snake_case , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = False , **snake_case , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _UpperCAmelCase = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) _UpperCAmelCase = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): _UpperCAmelCase = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _UpperCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _UpperCAmelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _UpperCAmelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , snake_case ): _UpperCAmelCase = [np.asarray(snake_case , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _UpperCAmelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _UpperCAmelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _UpperCAmelCase = np.array(snake_case ).astype(np.floataa ) # convert into correct format for padding _UpperCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _UpperCAmelCase = np.ones([len(snake_case ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _UpperCAmelCase = padded_audio_features * self.padding_value for i in range(len(snake_case ) ): _UpperCAmelCase = audio_features[i] _UpperCAmelCase = feature # return as BatchFeature if return_attention_mask: _UpperCAmelCase = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: _UpperCAmelCase = {'audio_values': padded_audio_features} _UpperCAmelCase = BatchFeature(data=snake_case , tensor_type=snake_case ) return encoded_inputs
24
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
1
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowercase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_attention_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_choices def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_attention_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = True _UpperCAmelCase = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = FlaxRoFormerModelTester(self ) @slow def lowerCamelCase_ ( self ) -> Tuple: for model_class_name in self.all_model_classes: _UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=snake_case ) _UpperCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case ) @require_flax class lowercase__ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(snake_case )[0] _UpperCAmelCase = 50000 _UpperCAmelCase = (1, 6, vocab_size) self.assertEqual(output.shape , snake_case ) _UpperCAmelCase = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
24
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
1
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCAmelCase ( A : Optional[int] , A : Union[str, Any] ): '''simple docstring''' assert isinstance(A , A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def UpperCAmelCase ( A : Any , A : Optional[int] , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _UpperCAmelCase = JsonDatasetReader(A , cache_dir=A , keep_in_memory=A ).read() _check_json_dataset(A , A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def UpperCAmelCase ( A : Dict , A : str , A : List[str] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = JsonDatasetReader(A , features=A , cache_dir=A ).read() _check_json_dataset(A , A ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def UpperCAmelCase ( A : Union[str, Any] , A : str , A : List[Any] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = JsonDatasetReader(A , features=A , cache_dir=A ).read() assert isinstance(A , A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} _UpperCAmelCase = features.copy() _UpperCAmelCase = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = JsonDatasetReader(A , features=A , cache_dir=A ).read() assert isinstance(A , A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def UpperCAmelCase ( A : Union[str, Any] , A : Any , A : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = JsonDatasetReader(A , cache_dir=A , split=A ).read() _check_json_dataset(A , A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def UpperCAmelCase ( A : int , A : int , A : Dict ): '''simple docstring''' if issubclass(A , A ): _UpperCAmelCase = jsonl_path elif issubclass(A , A ): _UpperCAmelCase = [jsonl_path] _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = JsonDatasetReader(A , cache_dir=A ).read() _check_json_dataset(A , A ) def UpperCAmelCase ( A : Tuple , A : Optional[Any] , A : Optional[int]=("train",) ): '''simple docstring''' assert isinstance(A , A ) for split in splits: _UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def UpperCAmelCase ( A : Optional[Any] , A : Dict , A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _UpperCAmelCase = JsonDatasetReader({'train': jsonl_path} , cache_dir=A , keep_in_memory=A ).read() _check_json_datasetdict(A , A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def UpperCAmelCase ( A : Dict , A : List[Any] , A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = features.copy() if features else default_expected_features _UpperCAmelCase = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCAmelCase = JsonDatasetReader({'train': jsonl_path} , features=A , cache_dir=A ).read() _check_json_datasetdict(A , A ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def UpperCAmelCase ( A : List[Any] , A : str , A : int ): '''simple docstring''' if split: _UpperCAmelCase = {split: jsonl_path} else: _UpperCAmelCase = 'train' _UpperCAmelCase = {'train': jsonl_path, 'test': jsonl_path} _UpperCAmelCase = tmp_path / 'cache' _UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _UpperCAmelCase = JsonDatasetReader(A , cache_dir=A ).read() _check_json_datasetdict(A , A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCAmelCase ( A : Dict ): '''simple docstring''' return json.load(A ) def UpperCAmelCase ( A : Optional[Any] ): '''simple docstring''' return [json.loads(A ) for line in buffer] class lowercase__ : '''simple docstring''' @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(snake_case , snake_case , lines=snake_case ).write() buffer.seek(0 ) _UpperCAmelCase = load_json_function(snake_case ) assert isinstance(snake_case , snake_case ) assert isinstance(exported_content[0] , snake_case ) assert len(snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(snake_case , snake_case , lines=snake_case , orient=snake_case ).write() buffer.seek(0 ) _UpperCAmelCase = load_json(snake_case ) assert isinstance(snake_case , snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(snake_case ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(snake_case , snake_case , lines=snake_case , num_proc=2 ).write() buffer.seek(0 ) _UpperCAmelCase = load_json_function(snake_case ) assert isinstance(snake_case , snake_case ) assert isinstance(exported_content[0] , snake_case ) assert len(snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple: with io.BytesIO() as buffer: JsonDatasetWriter(snake_case , snake_case , lines=snake_case , orient=snake_case , num_proc=2 ).write() buffer.seek(0 ) _UpperCAmelCase = load_json(snake_case ) assert isinstance(snake_case , snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(snake_case ) == 10 def lowerCamelCase_ ( self , snake_case ) -> int: with pytest.raises(snake_case ): with io.BytesIO() as buffer: JsonDatasetWriter(snake_case , snake_case , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple: _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / f'test.json.{extension}' _UpperCAmelCase = str(shared_datadir / f'test_file.json.{extension}' ) JsonDatasetWriter(snake_case , snake_case , compression=snake_case ).write() with fsspec.open(snake_case , 'rb' , compression='infer' ) as f: _UpperCAmelCase = f.read() with fsspec.open(snake_case , 'rb' , compression='infer' ) as f: _UpperCAmelCase = f.read() assert exported_content == original_content
24
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
1
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
1
"""simple docstring""" from __future__ import annotations lowercase = [True] * 1_00_00_01 lowercase = 2 while i * i <= 1_00_00_00: if seive[i]: for j in range(i * i, 1_00_00_01, i): lowercase = False i += 1 def UpperCAmelCase ( A : int ): '''simple docstring''' return seive[n] def UpperCAmelCase ( A : int ): '''simple docstring''' return any(digit in '02468' for digit in str(A ) ) def UpperCAmelCase ( A : int = 100_0000 ): '''simple docstring''' _UpperCAmelCase = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(A ) and not contains_an_even_digit(A ): _UpperCAmelCase = str(A ) _UpperCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(A ) )] if all(is_prime(A ) for i in list_nums ): result.append(A ) return result def UpperCAmelCase ( ): '''simple docstring''' return len(find_circular_primes() ) if __name__ == "__main__": print(F'''{len(find_circular_primes()) = }''')
24
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowercase = logging.get_logger(__name__) def UpperCAmelCase ( A : List[str] ): '''simple docstring''' _UpperCAmelCase = SwinConfig( embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , ) _UpperCAmelCase = DetaConfig( backbone_config=A , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=A , with_box_refine=A , two_stage=A , ) # set labels _UpperCAmelCase = 'huggingface/label-files' if "o365" in model_name: _UpperCAmelCase = 366 _UpperCAmelCase = 'object365-id2label.json' else: _UpperCAmelCase = 91 _UpperCAmelCase = 'coco-detection-id2label.json' _UpperCAmelCase = num_labels _UpperCAmelCase = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) _UpperCAmelCase = {int(A ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase ( A : Optional[Any] ): '''simple docstring''' _UpperCAmelCase = [] # stem # fmt: off rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') ) rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') ) rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') ) rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') ) rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') ) rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') ) rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') ) # fmt: on return rename_keys def UpperCAmelCase ( A : Any , A : List[Any] , A : List[Any] ): '''simple docstring''' _UpperCAmelCase = dct.pop(A ) _UpperCAmelCase = val def UpperCAmelCase ( A : Any , A : List[str] ): '''simple docstring''' _UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _UpperCAmelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' ) _UpperCAmelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[:dim, :] _UpperCAmelCase = in_proj_bias[: dim] _UpperCAmelCase = in_proj_weight[ dim : dim * 2, : ] _UpperCAmelCase = in_proj_bias[ dim : dim * 2 ] _UpperCAmelCase = in_proj_weight[ -dim :, : ] _UpperCAmelCase = in_proj_bias[-dim :] # fmt: on def UpperCAmelCase ( A : Optional[int] , A : int ): '''simple docstring''' _UpperCAmelCase = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention _UpperCAmelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _UpperCAmelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[:hidden_size, :] _UpperCAmelCase = in_proj_bias[:hidden_size] _UpperCAmelCase = in_proj_weight[ hidden_size : hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] _UpperCAmelCase = in_proj_weight[-hidden_size:, :] _UpperCAmelCase = in_proj_bias[-hidden_size:] def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def UpperCAmelCase ( A : Dict , A : List[str] , A : List[str] ): '''simple docstring''' _UpperCAmelCase = get_deta_config(A ) # load original state dict if model_name == "deta-swin-large": _UpperCAmelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' ) elif model_name == "deta-swin-large-o365": _UpperCAmelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' ) else: raise ValueError(f'Model name {model_name} not supported' ) _UpperCAmelCase = torch.load(A , map_location='cpu' )['model'] # original state dict for name, param in state_dict.items(): print(A , param.shape ) # rename keys _UpperCAmelCase = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_swin_q_k_v(A , config.backbone_config ) read_in_decoder_q_k_v(A , A ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: _UpperCAmelCase = state_dict.pop(A ) _UpperCAmelCase = val if "input_proj" in key: _UpperCAmelCase = state_dict.pop(A ) _UpperCAmelCase = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: _UpperCAmelCase = state_dict.pop(A ) _UpperCAmelCase = val # finally, create HuggingFace model and load state dict _UpperCAmelCase = DetaForObjectDetection(A ) model.load_state_dict(A ) model.eval() _UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(A ) # load image processor _UpperCAmelCase = DetaImageProcessor(format='coco_detection' ) # verify our conversion on image _UpperCAmelCase = prepare_img() _UpperCAmelCase = processor(images=A , return_tensors='pt' ) _UpperCAmelCase = encoding['pixel_values'] _UpperCAmelCase = model(pixel_values.to(A ) ) # verify logits print('Logits:' , outputs.logits[0, :3, :3] ) print('Boxes:' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": _UpperCAmelCase = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) _UpperCAmelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": _UpperCAmelCase = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) _UpperCAmelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(A ) , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(A ) , atol=1e-4 ) print('Everything ok!' ) if pytorch_dump_folder_path: # Save model and processor logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) processor.save_pretrained(A ) # Push to hub if push_to_hub: print('Pushing model and processor to hub...' ) model.push_to_hub(f'jozhang97/{model_name}' ) processor.push_to_hub(f'jozhang97/{model_name}' ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowercase = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
24
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use DeformableDetrImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''], '''tokenization_m2m_100''': ['''M2M100Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''', '''M2M100ForConditionalGeneration''', '''M2M100Model''', '''M2M100PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
1
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCAmelCase ( A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = np.max(_outputs , axis=-1 , keepdims=A ) _UpperCAmelCase = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''sigmoid''' _UpperCAmelCase = '''softmax''' _UpperCAmelCase = '''none''' @add_end_docstrings( A, R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''', ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = False _UpperCAmelCase = ClassificationFunction.NONE def __init__( self , **snake_case ) -> List[str]: super().__init__(**snake_case ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def lowerCamelCase_ ( self , snake_case=None , snake_case=None , snake_case="" , **snake_case ) -> Optional[Any]: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" _UpperCAmelCase = tokenizer_kwargs _UpperCAmelCase = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: _UpperCAmelCase = self.model.config.return_all_scores if isinstance(snake_case , snake_case ) or top_k is None: _UpperCAmelCase = top_k _UpperCAmelCase = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , snake_case , ) if return_all_scores: _UpperCAmelCase = None else: _UpperCAmelCase = 1 if isinstance(snake_case , snake_case ): _UpperCAmelCase = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: _UpperCAmelCase = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *snake_case , **snake_case ) -> Dict: _UpperCAmelCase = super().__call__(*snake_case , **snake_case ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. _UpperCAmelCase = 'top_k' not in kwargs if isinstance(args[0] , snake_case ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def lowerCamelCase_ ( self , snake_case , **snake_case ) -> Dict[str, GenericTensor]: _UpperCAmelCase = self.framework if isinstance(snake_case , snake_case ): return self.tokenizer(**snake_case , return_tensors=snake_case , **snake_case ) elif isinstance(snake_case , snake_case ) and len(snake_case ) == 1 and isinstance(inputs[0] , snake_case ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case , **snake_case ) elif isinstance(snake_case , snake_case ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(snake_case , return_tensors=snake_case , **snake_case ) def lowerCamelCase_ ( self , snake_case ) -> List[Any]: return self.model(**snake_case ) def lowerCamelCase_ ( self , snake_case , snake_case=None , snake_case=1 , snake_case=True ) -> List[Any]: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: _UpperCAmelCase = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: _UpperCAmelCase = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: _UpperCAmelCase = self.model.config.function_to_apply else: _UpperCAmelCase = ClassificationFunction.NONE _UpperCAmelCase = model_outputs['logits'][0] _UpperCAmelCase = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: _UpperCAmelCase = sigmoid(snake_case ) elif function_to_apply == ClassificationFunction.SOFTMAX: _UpperCAmelCase = softmax(snake_case ) elif function_to_apply == ClassificationFunction.NONE: _UpperCAmelCase = outputs else: raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} _UpperCAmelCase = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(snake_case ) ] if not _legacy: dict_scores.sort(key=lambda snake_case : x["score"] , reverse=snake_case ) if top_k is not None: _UpperCAmelCase = dict_scores[:top_k] return dict_scores
24
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class lowercase__ ( A ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , snake_case , ) super().__init__(*snake_case , **snake_case )
24
1
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class lowercase__ ( _lowerCamelCase ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ) -> Optional[int]: if tokenize_kwargs is None: _UpperCAmelCase = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) _UpperCAmelCase = truncation _UpperCAmelCase = tokenize_kwargs _UpperCAmelCase = {} if return_tensors is not None: _UpperCAmelCase = return_tensors return preprocess_params, {}, postprocess_params def lowerCamelCase_ ( self , snake_case , **snake_case ) -> Dict[str, GenericTensor]: _UpperCAmelCase = self.framework _UpperCAmelCase = self.tokenizer(A__ , return_tensors=A__ , **A__ ) return model_inputs def lowerCamelCase_ ( self , snake_case ) -> Optional[int]: _UpperCAmelCase = self.model(**A__ ) return model_outputs def lowerCamelCase_ ( self , snake_case , snake_case=False ) -> Optional[Any]: # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *snake_case , **snake_case ) -> List[str]: return super().__call__(*A__ , **A__ )
700
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = '''beit''' def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str: super().__init__(**snake_case ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available lowercase = { '''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ErnieForCausalLM''', '''ErnieForMaskedLM''', '''ErnieForMultipleChoice''', '''ErnieForNextSentencePrediction''', '''ErnieForPreTraining''', '''ErnieForQuestionAnswering''', '''ErnieForSequenceClassification''', '''ErnieForTokenClassification''', '''ErnieModel''', '''ErniePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
701
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowercase = logging.getLogger(__name__) if __name__ == "__main__": lowercase = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) lowercase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: lowercase = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') lowercase = Counter() for tk_ids in data: counter.update(tk_ids) lowercase = [0] * args.vocab_size for k, v in counter.items(): lowercase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
0
"""simple docstring""" import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowercase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=False , ) -> str: _UpperCAmelCase = size if size is not None else {'height': 20, 'width': 20} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_reduce_labels def lowerCamelCase_ ( self ) -> Union[str, Any]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) _UpperCAmelCase = Image.open(dataset[0]['file'] ) _UpperCAmelCase = Image.open(dataset[1]['file'] ) return image, map def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) _UpperCAmelCase = Image.open(ds[0]['file'] ) _UpperCAmelCase = Image.open(ds[1]['file'] ) _UpperCAmelCase = Image.open(ds[2]['file'] ) _UpperCAmelCase = Image.open(ds[3]['file'] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class lowercase__ ( snake_case__, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BeitImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = BeitImageProcessingTester(self ) @property def lowerCamelCase_ ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , 'center_crop' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) ) def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 20, 'width': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ ) _UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase_ ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ ) def lowerCamelCase_ ( self ) -> Dict: pass def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) _UpperCAmelCase = [] for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test not batched input (PIL images) _UpperCAmelCase , _UpperCAmelCase = prepare_semantic_single_inputs() _UpperCAmelCase = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 1, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) # Test batched input (PIL images) _UpperCAmelCase , _UpperCAmelCase = prepare_semantic_batch_inputs() _UpperCAmelCase = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' ) self.assertEqual( encoding['pixel_values'].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual( encoding['labels'].shape , ( 2, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) self.assertEqual(encoding['labels'].dtype , torch.long ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 ) def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 _UpperCAmelCase , _UpperCAmelCase = prepare_semantic_single_inputs() _UpperCAmelCase = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 150 ) _UpperCAmelCase = True _UpperCAmelCase = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' ) self.assertTrue(encoding['labels'].min().item() >= 0 ) self.assertTrue(encoding['labels'].max().item() <= 255 )
702
"""simple docstring""" from itertools import permutations def UpperCAmelCase ( A : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(A ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase ( A : int = 10 ): '''simple docstring''' return sum( int(''.join(map(A , A ) ) ) for num in permutations(range(A ) ) if is_substring_divisible(A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
24
0
"""simple docstring""" from typing import List import numpy as np def UpperCAmelCase ( A : Dict ): '''simple docstring''' _UpperCAmelCase = {key: len(A ) for key, value in gen_kwargs.items() if isinstance(A , A )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) _UpperCAmelCase = max(lists_lengths.values() , default=0 ) return max(1 , A ) def UpperCAmelCase ( A : str , A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = [] for group_idx in range(A ): _UpperCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _UpperCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _UpperCAmelCase = range(A , start + num_shards_to_add ) shards_indices_per_group.append(A ) return shards_indices_per_group def UpperCAmelCase ( A : Tuple , A : List[str] ): '''simple docstring''' _UpperCAmelCase = _number_of_shards_in_gen_kwargs(A ) if num_shards == 1: return [dict(A )] else: _UpperCAmelCase = _distribute_shards(num_shards=A , max_num_jobs=A ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(A , A ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(A ) ) ] def UpperCAmelCase ( A : Tuple ): '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , A ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def UpperCAmelCase ( A : Dict , A : List[str] ): '''simple docstring''' _UpperCAmelCase = {len(A ) for value in gen_kwargs.values() if isinstance(A , A )} _UpperCAmelCase = {} for size in list_sizes: _UpperCAmelCase = list(range(A ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _UpperCAmelCase = dict(A ) for key, value in shuffled_kwargs.items(): if isinstance(A , A ): _UpperCAmelCase = [value[i] for i in indices_per_size[len(A )]] return shuffled_kwargs
703
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = os.path.dirname(os.path.realpath(_lowercase ) ) _UpperCAmelCase = os.path.join(_lowercase , 'words.txt' ) _UpperCAmelCase = '' with open(_lowercase ) as f: _UpperCAmelCase = f.readline() _UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] _UpperCAmelCase = [ word for word in [sum(ord(_lowercase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_lowercase ) if __name__ == "__main__": print(solution())
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
0
"""simple docstring""" import os def UpperCAmelCase ( A : Tuple ): '''simple docstring''' _UpperCAmelCase = len(grid[0] ) _UpperCAmelCase = len(_A ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(_A ): for j in range(n_rows - 3 ): _UpperCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] _UpperCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: _UpperCAmelCase = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: _UpperCAmelCase = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) _UpperCAmelCase = max( _A , _A , _A , _A ) if max_product > largest: _UpperCAmelCase = max_product return largest def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = [] with open(os.path.dirname(_A ) + '/grid.txt' ) as file: for line in file: grid.append(line.strip('\n' ).split(' ' ) ) _UpperCAmelCase = [[int(_A ) for i in grid[j]] for j in range(len(_A ) )] return largest_product(_A ) if __name__ == "__main__": print(solution())
705
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase = logging.get_logger(__name__) lowercase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase__ ( A, A ): '''simple docstring''' _UpperCAmelCase = '''swin''' _UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]: super().__init__(**snake_case ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) ) _UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )] _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices( out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names ) class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = version.parse('''1.11''' ) @property def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase_ ( self ) -> float: return 1E-4
24
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> Optional[Any]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Tuple: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> List[Any]: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> str: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Dict: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Dict: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> List[Any]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Dict: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Tuple: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> List[Any]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> List[str]: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Optional[Any]: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> Optional[int]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Optional[Any]: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> str: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> Optional[int]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> int: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Tuple: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> Optional[int]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Optional[int]: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> int: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> Optional[int]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Optional[int]: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Union[str, Any]: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> List[str]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Dict: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Tuple: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> List[str]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> List[str]: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Dict: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> Dict: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> List[str]: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Optional[int]: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> str: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Optional[Any]: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> Tuple: requires_backends(cls , ['flax'] ) class lowercase__ ( metaclass=__lowerCamelCase ): '''simple docstring''' _UpperCAmelCase = ['''flax'''] def __init__( self , *snake_case , **snake_case ) -> List[Any]: requires_backends(self , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> str: requires_backends(cls , ['flax'] ) @classmethod def lowerCamelCase_ ( cls , *snake_case , **snake_case ) -> List[Any]: requires_backends(cls , ['flax'] )
706
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowercase__ ( nn.Module ): '''simple docstring''' def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str: super().__init__() _UpperCAmelCase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _UpperCAmelCase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _UpperCAmelCase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _UpperCAmelCase = [1, 0] def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any: _UpperCAmelCase = hidden_states _UpperCAmelCase = [] _UpperCAmelCase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _UpperCAmelCase = self.transformer_index_for_condition[i] _UpperCAmelCase = self.transformers[transformer_index]( snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _UpperCAmelCase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=snake_case )
24
0
"""simple docstring""" import math def UpperCAmelCase ( A : float , A : float ): '''simple docstring''' if ( not isinstance(A , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def UpperCAmelCase ( A : float , A : float ): '''simple docstring''' if ( not isinstance(A , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
707
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
0
"""simple docstring""" class lowercase__ : '''simple docstring''' def __init__( self , snake_case ) -> None: _UpperCAmelCase = set_counts _UpperCAmelCase = max(snake_case ) _UpperCAmelCase = len(snake_case ) _UpperCAmelCase = [1] * num_sets _UpperCAmelCase = list(range(snake_case ) ) def lowerCamelCase_ ( self , snake_case , snake_case ) -> bool: _UpperCAmelCase = self.get_parent(snake_case ) _UpperCAmelCase = self.get_parent(snake_case ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] _UpperCAmelCase = 0 _UpperCAmelCase = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 _UpperCAmelCase = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] _UpperCAmelCase = 0 _UpperCAmelCase = src_parent _UpperCAmelCase = self.set_counts[src_parent] _UpperCAmelCase = max(self.max_set , snake_case ) return True def lowerCamelCase_ ( self , snake_case ) -> int: if self.parents[disj_set] == disj_set: return disj_set _UpperCAmelCase = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
708
"""simple docstring""" from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( A : int , A : int , A : int ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _UpperCAmelCase = b * b - 4 * a * c _UpperCAmelCase = (-b + sqrt(A )) / (2 * a) _UpperCAmelCase = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(f'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
24
0
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class lowercase__ : '''simple docstring''' def __init__( self , snake_case ) -> Optional[int]: _UpperCAmelCase = str(id_ ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = [] _UpperCAmelCase = {} # {vertex:distance} def __lt__( self , snake_case ) -> Dict: return self.key < other.key def __repr__( self ) -> Any: return self.id def lowerCamelCase_ ( self , snake_case ) -> Any: self.neighbors.append(_lowerCAmelCase ) def lowerCamelCase_ ( self , snake_case , snake_case ) -> Union[str, Any]: _UpperCAmelCase = weight def UpperCAmelCase ( A : Optional[int] , A : str , A : Optional[int] , A : Dict ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , __lowerCAmelCase ) def UpperCAmelCase ( A : list , A : Vertex ): '''simple docstring''' _UpperCAmelCase = [] for u in graph: _UpperCAmelCase = math.inf _UpperCAmelCase = None _UpperCAmelCase = 0 _UpperCAmelCase = graph[:] while q: _UpperCAmelCase = min(__lowerCAmelCase ) q.remove(__lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _UpperCAmelCase = u _UpperCAmelCase = u.edges[v.id] for i in range(1 , len(__lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCAmelCase ( A : list , A : Vertex ): '''simple docstring''' for u in graph: _UpperCAmelCase = math.inf _UpperCAmelCase = None _UpperCAmelCase = 0 _UpperCAmelCase = list(__lowerCAmelCase ) hq.heapify(__lowerCAmelCase ) while h: _UpperCAmelCase = hq.heappop(__lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _UpperCAmelCase = u _UpperCAmelCase = u.edges[v.id] hq.heapify(__lowerCAmelCase ) for i in range(1 , len(__lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCAmelCase ( ): '''simple docstring''' pass if __name__ == "__main__": import doctest doctest.testmod()
709
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = BarthezTokenizer _UpperCAmelCase = BarthezTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCamelCase_ ( self ) -> Optional[int]: super().setUp() _UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) _UpperCAmelCase = tokenizer def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = '<pad>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(snake_case ) , 101122 ) def lowerCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCAmelCase = [0, 57, 3018, 70307, 91, 2] _UpperCAmelCase = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = 'I was born in 92000, and this is falsé.' _UpperCAmelCase = tokenizer.tokenize(snake_case ) _UpperCAmelCase = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(snake_case ) _UpperCAmelCase = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def lowerCamelCase_ ( self ) -> Optional[int]: # fmt: off _UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
24
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowercase = logging.get_logger(__name__) # TODO: upload to AWS lowercase = { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class lowercase__ ( A ): '''simple docstring''' _UpperCAmelCase = "retribert" def __init__( self , snake_case=30522 , snake_case=768 , snake_case=8 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=128 , snake_case=0 , **snake_case , ) -> List[Any]: super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = share_encoders _UpperCAmelCase = projection_dim
710
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = DiTPipeline _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _UpperCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , ) _UpperCAmelCase = AutoencoderKL() _UpperCAmelCase = DDIMScheduler() _UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]: if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = self.get_dummy_inputs(snake_case ) _UpperCAmelCase = pipe(**snake_case ).images _UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1E-3 ) def lowerCamelCase_ ( self ) -> Any: self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase_ ( self ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) _UpperCAmelCase = ['vase', 'umbrella'] _UpperCAmelCase = pipe.get_label_ids(snake_case ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images for word, image in zip(snake_case , snake_case ): _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
24
0
"""simple docstring""" import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib lowercase = get_logger() lowercase = None class lowercase__ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): '''simple docstring''' def __init__( self , snake_case=None , snake_case=None , **snake_case ) -> str: super().__init__(features=__A ) import jax from jaxlib.xla_client import Device if isinstance(__A , __A ): raise ValueError( f'Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` ' 'is not serializable neither with `pickle` nor with `dill`. Instead you can surround ' 'the device with `str()` to get its string identifier that will be internally mapped ' 'to the actual `jaxlib.xla_extension.Device`.' ) _UpperCAmelCase = device if isinstance(__A , __A ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _UpperCAmelCase = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'Device with string identifier {self.device} not listed among the available ' f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ' f'device: {str(jax.devices()[0] )}.' ) _UpperCAmelCase = str(jax.devices()[0] ) _UpperCAmelCase = jnp_array_kwargs @staticmethod def lowerCamelCase_ ( ) -> str: import jax return {str(__A ): device for device in jax.devices()} def lowerCamelCase_ ( self , snake_case ) -> Optional[int]: import jax import jax.numpy as jnp if isinstance(__A , __A ) and column: if all( isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(__A , axis=0 ) return column def lowerCamelCase_ ( self , snake_case ) -> List[str]: import jax import jax.numpy as jnp if isinstance(__A , (str, bytes, type(__A )) ): return value elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() _UpperCAmelCase = {} if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: _UpperCAmelCase = {"dtype": jnp.intaa} else: _UpperCAmelCase = {"dtype": jnp.intaa} elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): _UpperCAmelCase = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__A , PIL.Image.Image ): _UpperCAmelCase = np.asarray(__A ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _UpperCAmelCase = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} ) def lowerCamelCase_ ( self , snake_case ) -> List[Any]: import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__A , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(__A , '__array__' ) and not isinstance(__A , jax.Array ): _UpperCAmelCase = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__A , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) elif isinstance(__A , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) return self._tensorize(__A ) def lowerCamelCase_ ( self , snake_case ) -> List[Any]: return map_nested(self._recursive_tensorize , __A , map_list=__A ) def lowerCamelCase_ ( self , snake_case ) -> Any: _UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__A ) _UpperCAmelCase = self.python_features_decoder.decode_row(__A ) return self.recursive_tensorize(__A ) def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__A ) _UpperCAmelCase = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] ) _UpperCAmelCase = self.recursive_tensorize(__A ) _UpperCAmelCase = self._consolidate(__A ) return column def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__A ) _UpperCAmelCase = self.python_features_decoder.decode_batch(__A ) _UpperCAmelCase = self.recursive_tensorize(__A ) for column_name in batch: _UpperCAmelCase = self._consolidate(batch[column_name] ) return batch
711
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) _UpperCAmelCase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = abs(A ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase ( A : int ): '''simple docstring''' return sum(int(A ) for c in str(abs(A ) ) ) def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(A : Callable , A : int ) -> None: _UpperCAmelCase = f'{func.__name__}({value})' _UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(A , A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
24
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowercase__ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' ) _UpperCAmelCase = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _UpperCAmelCase = model(_a )["""last_hidden_state"""] _UpperCAmelCase = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. _UpperCAmelCase = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
712
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = [] create_all_state(1 , A , A , [] , A ) return result def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def UpperCAmelCase ( A : list[list[int]] ): '''simple docstring''' for i in total_list: print(*A ) if __name__ == "__main__": lowercase = 4 lowercase = 2 lowercase = generate_all_combinations(n, k) print_all_state(total_list)
24
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase__ ( _snake_case, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = KandinskyVaaInpaintPipeline _UpperCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase = [ 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase = False @property def lowerCamelCase_ ( self ) -> Dict: return 32 @property def lowerCamelCase_ ( self ) -> Any: return 32 @property def lowerCamelCase_ ( self ) -> Union[str, Any]: return self.time_input_dim @property def lowerCamelCase_ ( self ) -> List[str]: return self.time_input_dim * 4 @property def lowerCamelCase_ ( self ) -> List[Any]: return 100 @property def lowerCamelCase_ ( self ) -> Any: torch.manual_seed(0 ) _UpperCAmelCase = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _UpperCAmelCase = UNetaDConditionModel(**snake_case ) return model @property def lowerCamelCase_ ( self ) -> int: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase_ ( self ) -> Tuple: torch.manual_seed(0 ) _UpperCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = self.dummy_unet _UpperCAmelCase = self.dummy_movq _UpperCAmelCase = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case , set_alpha_to_one=snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case , ) _UpperCAmelCase = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Any: _UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case ) ).to(snake_case ) _UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case ) # create init_image _UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case ) ).to(snake_case ) _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCAmelCase = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((256, 256) ) # create mask _UpperCAmelCase = np.ones((64, 64) , dtype=np.floataa ) _UpperCAmelCase = 0 if str(snake_case ).startswith('mps' ): _UpperCAmelCase = torch.manual_seed(snake_case ) else: _UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case ) _UpperCAmelCase = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = 'cpu' _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**snake_case ) _UpperCAmelCase = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = pipe(**self.get_dummy_inputs(snake_case ) ) _UpperCAmelCase = output.images _UpperCAmelCase = pipe( **self.get_dummy_inputs(snake_case ) , return_dict=snake_case , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] print(f'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _UpperCAmelCase = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def lowerCamelCase_ ( self ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> List[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _UpperCAmelCase = np.ones((768, 768) , dtype=np.floataa ) _UpperCAmelCase = 0 _UpperCAmelCase = 'a hat' _UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case ) _UpperCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _UpperCAmelCase = pipeline.to(snake_case ) pipeline.set_progress_bar_config(disable=snake_case ) _UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) _UpperCAmelCase , _UpperCAmelCase = pipe_prior( snake_case , generator=snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _UpperCAmelCase = pipeline( image=snake_case , mask_image=snake_case , image_embeds=snake_case , negative_image_embeds=snake_case , generator=snake_case , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case , snake_case )
713
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowercase = logging.getLogger() def UpperCAmelCase ( A : Path , A : list ): '''simple docstring''' _UpperCAmelCase = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) lowercase = '''patrickvonplaten/t5-tiny-random''' lowercase = '''sshleifer/bart-tiny-random''' lowercase = '''sshleifer/tiny-mbart''' lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(snake_case , snake_case ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(snake_case , 'argv' , snake_case ): run_generate() assert Path(snake_case ).exists() # os.remove(Path(output_file_name)) def lowerCamelCase_ ( self ) -> str: self.run_eval_tester(snake_case ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> List[Any]: self.run_eval_tester(snake_case ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCamelCase_ ( self , snake_case ) -> Dict: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _UpperCAmelCase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _UpperCAmelCase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / 'scores.json' ) _UpperCAmelCase = str(tmp_dir / 'val.target' ) _dump_articles(snake_case , text['en'] ) _dump_articles(snake_case , text['de'] ) _UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization' _UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(snake_case , 'argv' , snake_case ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args'] _UpperCAmelCase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(snake_case ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(snake_case ).exists() os.remove(Path(snake_case ) )
24
0
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": lowercase = input('''Enter image url: ''').strip() print(F'''Downloading image from {url} ...''') lowercase = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image lowercase = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] lowercase = requests.get(image_url).content lowercase = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, '''wb''') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
714
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowercase = logging.get_logger(__name__) lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) else: return _interleave_iterable_datasets( A , A , A , info=A , split=A , stopping_strategy=A ) def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(A ): if not isinstance(A , (Dataset, IterableDataset) ): if isinstance(A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' ) if i == 0: _UpperCAmelCase , _UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset) ) elif not isinstance(A , A ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A , info=A , split=A , axis=A ) else: return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
24
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.dirname(os.path.realpath(__A ) ) _UpperCAmelCase = os.path.join(__A , 'words.txt' ) _UpperCAmelCase = """""" with open(__A ) as f: _UpperCAmelCase = f.readline() _UpperCAmelCase = [word.strip('\"' ) for word in words.strip('\r\n' ).split(',' )] _UpperCAmelCase = [ word for word in [sum(ord(__A ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__A ) if __name__ == "__main__": print(solution())
715
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case ) return generator, ["Something to write", "Something else"] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: _UpperCAmelCase = generator('Something there' ) self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) _UpperCAmelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case ) self.assertEqual( snake_case , [ [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], [{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}], ] , ) with self.assertRaises(snake_case ): generator(4 ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] ) _UpperCAmelCase = 3 _UpperCAmelCase = generator( 'Something there' , num_return_sequences=snake_case , num_beams=snake_case , ) _UpperCAmelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(snake_case , snake_case ) _UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case ) self.assertEqual( snake_case , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) _UpperCAmelCase = generator.model.config.eos_token_id _UpperCAmelCase = '<pad>' _UpperCAmelCase = generator( ['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , ) self.assertEqual( snake_case , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility _UpperCAmelCase = generator('Something there' , do_sample=snake_case ) self.assertEqual(snake_case , [{'generated_text': ''}] )
24
0
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] ) def UpperCAmelCase ( A : List[str] , A : Optional[int] , A : Optional[int] , A : str , ): '''simple docstring''' if index == len(_lowerCamelCase ): print(_lowerCamelCase ) return for i in range(len(_lowerCamelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) _UpperCAmelCase = True create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase ) current_sequence.pop() _UpperCAmelCase = False lowercase = [3, 1, 2, 4] generate_all_permutations(sequence) lowercase = ['''A''', '''B''', '''C'''] generate_all_permutations(sequence_a)
716
"""simple docstring""" def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowercase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowercase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
24
0
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowercase = sys.version_info >= (3, 10) def UpperCAmelCase ( A : Optional[Any]=None , A : Optional[Any]=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_lowerCamelCase ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = field(default='''toto''', metadata={'''help''': '''help message'''} ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = None class lowercase__ ( _A ): '''simple docstring''' _UpperCAmelCase = '''titi''' _UpperCAmelCase = '''toto''' class lowercase__ ( _A ): '''simple docstring''' _UpperCAmelCase = '''titi''' _UpperCAmelCase = '''toto''' _UpperCAmelCase = 42 @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = '''toto''' def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = '''toto''' def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = None _UpperCAmelCase = field(default=_A, metadata={'''help''': '''help message'''} ) _UpperCAmelCase = None _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[] ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[1, 2, 3] ) _UpperCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) _UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = field() _UpperCAmelCase = field() _UpperCAmelCase = field() def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = field() _UpperCAmelCase = None _UpperCAmelCase = field(default='''toto''', metadata={'''help''': '''help message'''} ) _UpperCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = None @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = None _UpperCAmelCase = field(default=_A, metadata={'''help''': '''help message'''} ) _UpperCAmelCase = None _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[] ) class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__lowerCamelCase ).items() if k != "container"} _UpperCAmelCase = {k: v for k, v in vars(__lowerCamelCase ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , __lowerCamelCase ) and yy.get('choices' , __lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](__lowerCamelCase ) , yy['type'](__lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument('--bar' , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument('--baz' , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument('--flag' , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs='?' ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] (_UpperCAmelCase ) = parser.parse_args_into_dataclasses(__lowerCamelCase , look_for_args_file=__lowerCamelCase ) self.assertFalse(example.flag ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=__lowerCamelCase ) expected.add_argument('--baz' , default='toto' , type=__lowerCamelCase , help='help message' ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs='?' ) expected.add_argument('--baz' , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=__lowerCamelCase , dest='baz' ) expected.add_argument('--opt' , type=__lowerCamelCase , default=__lowerCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__lowerCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) _UpperCAmelCase = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) _UpperCAmelCase = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) _UpperCAmelCase = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) _UpperCAmelCase = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) ) def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCamelCase_ ( self ) -> Tuple: @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = '''toto''' _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) _UpperCAmelCase = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) _UpperCAmelCase = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__lowerCamelCase ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__lowerCamelCase ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__lowerCamelCase ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__lowerCamelCase ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(__lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , default=__lowerCamelCase , type=__lowerCamelCase ) expected.add_argument('--bar' , default=__lowerCamelCase , type=__lowerCamelCase , help='help message' ) expected.add_argument('--baz' , default=__lowerCamelCase , type=__lowerCamelCase ) expected.add_argument('--ces' , nargs='+' , default=[] , type=__lowerCamelCase ) expected.add_argument('--des' , nargs='+' , default=[] , type=__lowerCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__lowerCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , bar=__lowerCamelCase , baz=__lowerCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(__lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument('--required_str' , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__lowerCamelCase , ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=__lowerCamelCase , required=__lowerCamelCase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__lowerCamelCase , ) expected.add_argument('--opt' , type=__lowerCamelCase , default=__lowerCamelCase ) expected.add_argument('--baz' , default='toto' , type=__lowerCamelCase , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__lowerCamelCase ) self.argparsersEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } _UpperCAmelCase = parser.parse_dict(__lowerCamelCase )[0] _UpperCAmelCase = BasicExample(**__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(__lowerCamelCase , parser.parse_dict , __lowerCamelCase , allow_extra_keys=__lowerCamelCase ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__lowerCamelCase , 'temp_json' ) os.mkdir(__lowerCamelCase ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] _UpperCAmelCase = BasicExample(**__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) _UpperCAmelCase = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__lowerCamelCase , 'temp_yaml' ) os.mkdir(__lowerCamelCase ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] _UpperCAmelCase = BasicExample(**__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = HfArgumentParser(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase )
717
"""simple docstring""" import os lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while index < len(A ) - 1: _UpperCAmelCase = SYMBOLS[numerals[index]] _UpperCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCAmelCase ( A : int ): '''simple docstring''' _UpperCAmelCase = '' _UpperCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 _UpperCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _UpperCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCAmelCase ( A : str = "/p089_roman.txt" ): '''simple docstring''' _UpperCAmelCase = 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: _UpperCAmelCase = filea.readlines() for line in lines: _UpperCAmelCase = line.strip() _UpperCAmelCase = parse_roman_numerals(A ) _UpperCAmelCase = generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
24
0
"""simple docstring""" import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowercase = "\\n Text data.\n Second line of data." lowercase = "file" @pytest.fixture(scope='session' ) def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '''.zstd''') _UpperCAmelCase = bytes(a_ , 'utf-8' ) with zstd.open(a_ , 'wb' ) as f: f.write(a_ ) return path @pytest.fixture def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , a_ ) , 'w' ) as f: f.write(a_ ) return FILE_PATH @pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] ) def UpperCAmelCase ( A : str , A : Tuple , A : Dict , A : Optional[Any] , A : Tuple , A : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _UpperCAmelCase = input_paths[compression_format] _UpperCAmelCase = tmp_path / '''cache''' _UpperCAmelCase = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ ) _UpperCAmelCase = cached_path(a_ , download_config=a_ ) with open(a_ ) as f: _UpperCAmelCase = f.read() with open(a_ ) as f: _UpperCAmelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted' , [True, False] ) @pytest.mark.parametrize('default_cache_dir' , [True, False] ) def UpperCAmelCase ( A : List[Any] , A : Optional[int] , A : List[Any] , A : Optional[int] , A : Tuple ): '''simple docstring''' _UpperCAmelCase = '''custom_cache''' _UpperCAmelCase = '''custom_extracted_dir''' _UpperCAmelCase = tmp_path / '''custom_extracted_path''' if default_extracted: _UpperCAmelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , a_ ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(a_ ) ) _UpperCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _UpperCAmelCase = xz_file _UpperCAmelCase = ( DownloadConfig(extract_compressed_file=a_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ ) ) _UpperCAmelCase = cached_path(a_ , download_config=a_ ) assert Path(a_ ).parent.parts[-2:] == expected def UpperCAmelCase ( A : Any ): '''simple docstring''' _UpperCAmelCase = str(Path(a_ ).resolve() ) assert cached_path(a_ ) == text_file # relative path _UpperCAmelCase = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(a_ ) == text_file def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(a_ ): cached_path(a_ ) # relative path _UpperCAmelCase = '''./__missing_file__.txt''' with pytest.raises(a_ ): cached_path(a_ ) def UpperCAmelCase ( A : Optional[int] ): '''simple docstring''' _UpperCAmelCase = get_from_cache(f'tmp://{tmpfs_file}' ) with open(a_ ) as f: _UpperCAmelCase = f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE' , a_ ) def UpperCAmelCase ( ): '''simple docstring''' with pytest.raises(a_ ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , a_ ) def UpperCAmelCase ( A : Dict ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.html''' with pytest.raises(a_ ): http_get('https://huggingface.co' , temp_file=a_ ) with pytest.raises(a_ ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , a_ ) def UpperCAmelCase ( A : List[str] ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.html''' with pytest.raises(a_ ): ftp_get('ftp://huggingface.co' , temp_file=a_ ) with pytest.raises(a_ ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , a_ ) def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.html''' with pytest.raises(a_ ): fsspec_get('s3://huggingface.co' , temp_file=a_ ) with pytest.raises(a_ ): fsspec_head('s3://huggingface.co' )
718
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } _UpperCAmelCase = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(snake_case ) , snake_case ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) ) _UpperCAmelCase = np.random.randn(3 , 4 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(1 , 3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) ) _UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) ) @require_torch def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = torch.tensor(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_tf def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = tf.constant(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) ) @require_flax def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = np.random.randn(3 , 4 ) _UpperCAmelCase = jnp.array(snake_case ) self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
24
0
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowercase__ : '''simple docstring''' @staticmethod def lowerCamelCase_ ( *snake_case , **snake_case ) -> str: pass def UpperCAmelCase ( A : List[Any] ): '''simple docstring''' _UpperCAmelCase = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def UpperCAmelCase ( A : str ): '''simple docstring''' _UpperCAmelCase = np.array(_A ) _UpperCAmelCase = npimg.shape return {"hash": hashimage(_A ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _UpperCAmelCase = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = MaskGenerationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict: pass @require_tf @unittest.skip('Image segmentation not implemented in TF' ) def lowerCamelCase_ ( self ) -> List[str]: pass @slow @require_torch def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = pipeline('mask-generation' , model='facebook/sam-vit-huge' ) _UpperCAmelCase = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 ) # Shortening by hashing _UpperCAmelCase = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = 'facebook/sam-vit-huge' _UpperCAmelCase = pipeline('mask-generation' , model=__UpperCamelCase ) _UpperCAmelCase = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing _UpperCAmelCase = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, ] , )
719
"""simple docstring""" import os def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
24
0
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
720
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
0