code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Optional[int] =OpenAIGPTTokenizer __a : Tuple =OpenAIGPTTokenizerFast __a : Any =True __a : Union[str, Any] =False def __snake_case ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self , UpperCAmelCase_ ): return "lower newer", "lower newer" def __snake_case ( self ): lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase = '''lower''' lowerCAmelCase = ['''low''', '''er</w>'''] lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = tokens + ['''<unk>'''] lowerCAmelCase = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) # Simple input lowerCAmelCase = '''This is a simple input''' lowerCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2'''] lowerCAmelCase = ('''This is a simple input''', '''This is a pair''') lowerCAmelCase = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' ) # Simple input self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' ) # Simple input self.assertRaises( UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' , ) # Pair input self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' ) # Pair input self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' ) # Pair input self.assertRaises( UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' , ) def __snake_case ( self ): pass @require_ftfy @require_spacy @require_tokenizers class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' pass
33
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
1
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets UpperCAmelCase_ ="""\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ UpperCAmelCase_ ="""\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ UpperCAmelCase_ =""" Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): '''simple docstring''' def __snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] , ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=False ): if rouge_types is None: lowerCAmelCase = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] lowerCAmelCase = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase_ , use_stemmer=UpperCAmelCase_ ) if use_aggregator: lowerCAmelCase = scoring.BootstrapAggregator() else: lowerCAmelCase = [] for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = scorer.score(UpperCAmelCase_ , UpperCAmelCase_ ) if use_aggregator: aggregator.add_scores(UpperCAmelCase_ ) else: scores.append(UpperCAmelCase_ ) if use_aggregator: lowerCAmelCase = aggregator.aggregate() else: lowerCAmelCase = {} for key in scores[0]: lowerCAmelCase = [score[key] for score in scores] return result
33
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
1
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
33
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[str] =["""image_processor""", """tokenizer"""] __a : Optional[Any] ="""BridgeTowerImageProcessor""" __a : Optional[Any] =("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): lowerCAmelCase = self.tokenizer( text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) # add pixel_values + pixel_mask lowerCAmelCase = self.image_processor( UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , **UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def __snake_case ( self ): lowerCAmelCase = self.tokenizer.model_input_names lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
33
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""vivit""" def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=32 , UpperCAmelCase_=[2, 16, 16] , UpperCAmelCase_=3 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu_fast" , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-0_6 , UpperCAmelCase_=True , **UpperCAmelCase_ , ): lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = image_size lowerCAmelCase = num_frames lowerCAmelCase = tubelet_size lowerCAmelCase = num_channels lowerCAmelCase = qkv_bias super().__init__(**UpperCAmelCase_ )
33
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = inspect.getfile(accelerate.test_utils ) lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) lowerCAmelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def __snake_case ( self ): print(F"""Found {torch.cuda.device_count()} devices.""" ) lowerCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def __snake_case ( self ): print(F"""Found {torch.cuda.device_count()} devices.""" ) lowerCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def __snake_case ( self ): lowerCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def __snake_case ( self ): print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) lowerCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() ) if __name__ == "__main__": UpperCAmelCase_ =Accelerator() UpperCAmelCase_ =(accelerator.state.process_index + 2, 10) UpperCAmelCase_ =torch.randint(0, 10, shape).to(accelerator.device) UpperCAmelCase_ ="""""" UpperCAmelCase_ =accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCAmelCase_ =accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCAmelCase_ =accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
33
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''ZinengTang/tvlt-base''' lowerCAmelCase = tempfile.mkdtemp() def __snake_case ( self , **UpperCAmelCase_ ): return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def __snake_case ( self , **UpperCAmelCase_ ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ ) self.assertIsInstance(processor.image_processor , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) lowerCAmelCase = np.ones([1_20_00] ) lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='''np''' ) lowerCAmelCase = processor(audio=UpperCAmelCase_ , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) lowerCAmelCase = np.ones([3, 2_24, 2_24] ) lowerCAmelCase = image_processor(UpperCAmelCase_ , return_tensors='''np''' ) lowerCAmelCase = processor(images=UpperCAmelCase_ , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) lowerCAmelCase = np.ones([1_20_00] ) lowerCAmelCase = np.ones([3, 2_24, 2_24] ) lowerCAmelCase = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
33
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Tuple ="""longformer""" def __init__( self , UpperCAmelCase_ = 5_12 , UpperCAmelCase_ = 2 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 0 , UpperCAmelCase_ = 2 , UpperCAmelCase_ = 3_05_22 , UpperCAmelCase_ = 7_68 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 30_72 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 5_12 , UpperCAmelCase_ = 2 , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = 1E-1_2 , UpperCAmelCase_ = False , **UpperCAmelCase_ , ): super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase = attention_window lowerCAmelCase = sep_token_id lowerCAmelCase = bos_token_id lowerCAmelCase = eos_token_id lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = onnx_export class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = "default" , UpperCAmelCase_ = None ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = True @property def __snake_case ( self ): if self.task == "multiple-choice": lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def __snake_case ( self ): lowerCAmelCase = super().outputs if self.task == "default": lowerCAmelCase = {0: '''batch'''} return outputs @property def __snake_case ( self ): return 1E-4 @property def __snake_case ( self ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = -1 , UpperCAmelCase_ = -1 , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ): lowerCAmelCase = super().generate_dummy_inputs( preprocessor=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCAmelCase = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global lowerCAmelCase = 1 return inputs
33
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
1
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self ): lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) lowerCAmelCase = '''A painting of a squirrel eating a burger''' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __snake_case ( self ): lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) lowerCAmelCase = '''A painting of a squirrel eating a burger''' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def __snake_case ( self ): lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) lowerCAmelCase = '''A painting of a squirrel eating a burger''' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase_ , ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
33
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
1
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[Any] ="""trajectory_transformer""" __a : str =["""past_key_values"""] __a : List[Any] ={ """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , UpperCAmelCase_=1_00 , UpperCAmelCase_=5 , UpperCAmelCase_=1 , UpperCAmelCase_=1 , UpperCAmelCase_=2_49 , UpperCAmelCase_=6 , UpperCAmelCase_=17 , UpperCAmelCase_=25 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0006 , UpperCAmelCase_=5_12 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=5_02_56 , UpperCAmelCase_=5_02_56 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = action_weight lowerCAmelCase = reward_weight lowerCAmelCase = value_weight lowerCAmelCase = max_position_embeddings lowerCAmelCase = block_size lowerCAmelCase = action_dim lowerCAmelCase = observation_dim lowerCAmelCase = transition_dim lowerCAmelCase = learning_rate lowerCAmelCase = n_layer lowerCAmelCase = n_head lowerCAmelCase = n_embd lowerCAmelCase = embd_pdrop lowerCAmelCase = attn_pdrop lowerCAmelCase = resid_pdrop lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = kaiming_initializer_range lowerCAmelCase = use_cache super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
33
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
1
import collections import importlib.util import os import re from pathlib import Path UpperCAmelCase_ ="""src/transformers""" # Matches is_xxx_available() UpperCAmelCase_ =re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ =re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ =re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase_ =re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ =re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ =re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ =re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ =re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ =re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase_ =re.compile(R"""^\s*try:""") # Catches a line with else: UpperCAmelCase_ =re.compile(R"""^\s*else:""") def UpperCAmelCase ( _snake_case ): if _re_test_backend.search(_snake_case ) is None: return None lowerCAmelCase = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def UpperCAmelCase ( _snake_case ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase = f.readlines() lowerCAmelCase = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): lowerCAmelCase = _re_one_line_import_struct.search(_snake_case ).groups()[0] lowerCAmelCase = re.findall('''\[([^\]]+)\]''' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: lowerCAmelCase = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: lowerCAmelCase = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' ) lowerCAmelCase = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase = lines[line_index] lowerCAmelCase = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase = lines[line_index] lowerCAmelCase = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( _snake_case , _snake_case ): def find_duplicates(_snake_case ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase = [] for key in import_dict_objects.keys(): lowerCAmelCase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def UpperCAmelCase ( ): lowerCAmelCase = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: lowerCAmelCase = os.path.join(_snake_case , '''__init__.py''' ) lowerCAmelCase = parse_init(_snake_case ) if objects is not None: lowerCAmelCase = analyze_results(*_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('''\n\n'''.join(_snake_case ) ) def UpperCAmelCase ( ): lowerCAmelCase = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) lowerCAmelCase = short_path.replace(os.path.sep , '''.''' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) lowerCAmelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_snake_case ) return submodules UpperCAmelCase_ =[ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def UpperCAmelCase ( ): # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase = spec.loader.load_module() lowerCAmelCase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_snake_case ) > 0: lowerCAmelCase = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
33
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
1
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
1
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def UpperCAmelCase ( _snake_case = "laptop" ): lowerCAmelCase = F"""https://www.amazon.in/laptop/s?k={product}""" lowerCAmelCase = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } lowerCAmelCase = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: lowerCAmelCase = item.ha.text lowerCAmelCase = '''https://www.amazon.in/''' + item.ha.a['''href'''] lowerCAmelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: lowerCAmelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: lowerCAmelCase = '''Not available''' try: lowerCAmelCase = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: lowerCAmelCase = '''''' try: lowerCAmelCase = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: lowerCAmelCase = float('''nan''' ) except AttributeError: pass lowerCAmelCase = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase = ''' ''' lowerCAmelCase = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": UpperCAmelCase_ ="""headphones""" get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[int] ="""data2vec-vision""" def __init__( self , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=2_24 , UpperCAmelCase_=16 , UpperCAmelCase_=3 , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=True , UpperCAmelCase_=[3, 5, 7, 11] , UpperCAmelCase_=[1, 2, 3, 6] , UpperCAmelCase_=True , UpperCAmelCase_=0.4 , UpperCAmelCase_=2_56 , UpperCAmelCase_=1 , UpperCAmelCase_=False , UpperCAmelCase_=2_55 , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = use_mask_token lowerCAmelCase = use_absolute_position_embeddings lowerCAmelCase = use_relative_position_bias lowerCAmelCase = use_shared_relative_position_bias lowerCAmelCase = layer_scale_init_value lowerCAmelCase = drop_path_rate lowerCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) lowerCAmelCase = out_indices lowerCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) lowerCAmelCase = use_auxiliary_head lowerCAmelCase = auxiliary_loss_weight lowerCAmelCase = auxiliary_channels lowerCAmelCase = auxiliary_num_convs lowerCAmelCase = auxiliary_concat_input lowerCAmelCase = semantic_loss_ignore_index class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] =version.parse("""1.11""" ) @property def __snake_case ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __snake_case ( self ): return 1E-4
33
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : torch.FloatTensor __a : torch.FloatTensor class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : int =1 @register_to_config def __init__( self , UpperCAmelCase_ = 20_00 , UpperCAmelCase_ = 0.15 , UpperCAmelCase_ = 0.01 , UpperCAmelCase_ = 1348.0 , UpperCAmelCase_ = 1E-5 , UpperCAmelCase_ = 1 , ): # standard deviation of the initial noise distribution lowerCAmelCase = sigma_max # setable values lowerCAmelCase = None self.set_sigmas(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): return sample def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None ): lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCAmelCase = torch.linspace(1 , UpperCAmelCase_ , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None ): lowerCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min lowerCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCAmelCase = torch.exp(torch.linspace(math.log(UpperCAmelCase_ ) , math.log(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) lowerCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True , ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCAmelCase = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCAmelCase = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCAmelCase = timesteps.to(self.discrete_sigmas.device ) lowerCAmelCase = self.discrete_sigmas[timesteps].to(sample.device ) lowerCAmelCase = self.get_adjacent_sigma(UpperCAmelCase_ , UpperCAmelCase_ ).to(sample.device ) lowerCAmelCase = torch.zeros_like(UpperCAmelCase_ ) lowerCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCAmelCase = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCAmelCase = diffusion.unsqueeze(-1 ) lowerCAmelCase = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCAmelCase = randn_tensor( sample.shape , layout=sample.layout , generator=UpperCAmelCase_ , device=sample.device , dtype=sample.dtype ) lowerCAmelCase = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=UpperCAmelCase_ , prev_sample_mean=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True , ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCAmelCase_ ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() lowerCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() lowerCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCAmelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCAmelCase = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCAmelCase = step_size.unsqueeze(-1 ) lowerCAmelCase = sample + step_size * model_output lowerCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCAmelCase = timesteps.to(original_samples.device ) lowerCAmelCase = self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCAmelCase = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(UpperCAmelCase_ ) * sigmas[:, None, None, None] ) lowerCAmelCase = noise + original_samples return noisy_samples def __len__( self ): return self.config.num_train_timesteps
33
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : int =(UniPCMultistepScheduler,) __a : Dict =(("""num_inference_steps""", 2_5),) def __snake_case ( self , **UpperCAmelCase_ ): lowerCAmelCase = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**UpperCAmelCase_ ) return config def __snake_case ( self , UpperCAmelCase_=0 , **UpperCAmelCase_ ): lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , UpperCAmelCase_ ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**UpperCAmelCase_ ) lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(UpperCAmelCase_ ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase_ ) lowerCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase_ ) new_scheduler.set_timesteps(UpperCAmelCase_ ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase , lowerCAmelCase = sample, sample for t in range(UpperCAmelCase_ , time_step + scheduler.config.solver_order + 1 ): lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample lowerCAmelCase = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __snake_case ( self , UpperCAmelCase_=0 , **UpperCAmelCase_ ): lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , UpperCAmelCase_ ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(UpperCAmelCase_ ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase_ ) lowerCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase_ ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample lowerCAmelCase = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __snake_case ( self , UpperCAmelCase_=None , **UpperCAmelCase_ ): if scheduler is None: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**UpperCAmelCase_ ) lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**UpperCAmelCase_ ) lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample return sample def __snake_case ( self ): lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('''num_inference_steps''' , UpperCAmelCase_ ) for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCAmelCase_ , '''set_timesteps''' ): scheduler.set_timesteps(UpperCAmelCase_ ) elif num_inference_steps is not None and not hasattr(UpperCAmelCase_ , '''set_timesteps''' ): lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] lowerCAmelCase = scheduler.timesteps[5] lowerCAmelCase = scheduler.timesteps[6] lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __snake_case ( self ): # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() ) lowerCAmelCase = self.full_loop(scheduler=UpperCAmelCase_ ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_mean.item() - 0.2464 ) < 1E-3 lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase = self.full_loop(scheduler=UpperCAmelCase_ ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_mean.item() - 0.2464 ) < 1E-3 def __snake_case ( self ): for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_ ) def __snake_case ( self ): self.check_over_configs(thresholding=UpperCAmelCase_ ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , ) def __snake_case ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase_ ) def __snake_case ( self ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , ) lowerCAmelCase = self.full_loop( solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , ) assert not torch.isnan(UpperCAmelCase_ ).any(), "Samples have nan numbers" def __snake_case ( self ): self.check_over_configs(lower_order_final=UpperCAmelCase_ ) self.check_over_configs(lower_order_final=UpperCAmelCase_ ) def __snake_case ( self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=UpperCAmelCase_ , time_step=0 ) def __snake_case ( self ): lowerCAmelCase = self.full_loop() lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_mean.item() - 0.2464 ) < 1E-3 def __snake_case ( self ): lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_mean.item() - 0.1014 ) < 1E-3 def __snake_case ( self ): lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0 ) lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(UpperCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample assert sample.dtype == torch.floataa def __snake_case ( self , **UpperCAmelCase_ ): for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**UpperCAmelCase_ ) lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
33
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
1
class __UpperCamelCase : '''simple docstring''' def __init__( self ): lowerCAmelCase = {} # Mapping from char to TrieNode lowerCAmelCase = False def __snake_case ( self , UpperCAmelCase_ ): for word in words: self.insert(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self for char in word: if char not in curr.nodes: lowerCAmelCase = TrieNode() lowerCAmelCase = curr.nodes[char] lowerCAmelCase = True def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self for char in word: if char not in curr.nodes: return False lowerCAmelCase = curr.nodes[char] return curr.is_leaf def __snake_case ( self , UpperCAmelCase_ ): def _delete(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> bool: if index == len(UpperCAmelCase_ ): # If word does not exist if not curr.is_leaf: return False lowerCAmelCase = False return len(curr.nodes ) == 0 lowerCAmelCase = word[index] lowerCAmelCase = curr.nodes.get(UpperCAmelCase_ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted lowerCAmelCase = _delete(UpperCAmelCase_ , UpperCAmelCase_ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , UpperCAmelCase_ , 0 ) def UpperCAmelCase ( _snake_case , _snake_case ): if node.is_leaf: print(_snake_case , end=''' ''' ) for key, value in node.nodes.items(): print_words(_snake_case , word + key ) def UpperCAmelCase ( ): lowerCAmelCase = '''banana bananas bandana band apple all beast'''.split() lowerCAmelCase = TrieNode() root.insert_many(_snake_case ) # print_words(root, "") assert all(root.find(_snake_case ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def UpperCAmelCase ( _snake_case , _snake_case ): print(str(_snake_case ) , '''works!''' if passes else '''doesn\'t work :(''' ) def UpperCAmelCase ( ): assert test_trie() def UpperCAmelCase ( ): print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
from copy import deepcopy class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None ): if arr is None and size is not None: lowerCAmelCase = size lowerCAmelCase = [0] * size elif arr is not None: self.init(UpperCAmelCase_ ) else: raise ValueError('''Either arr or size must be specified''' ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = deepcopy(UpperCAmelCase_ ) for i in range(1 , self.size ): lowerCAmelCase = self.next_(UpperCAmelCase_ ) if j < self.size: self.tree[j] += self.tree[i] def __snake_case ( self ): lowerCAmelCase = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): lowerCAmelCase = self.next_(UpperCAmelCase_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __snake_case ( UpperCAmelCase_ ): return index + (index & (-index)) @staticmethod def __snake_case ( UpperCAmelCase_ ): return index - (index & (-index)) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value lowerCAmelCase = self.next_(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) ) def __snake_case ( self , UpperCAmelCase_ ): if right == 0: return 0 lowerCAmelCase = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] lowerCAmelCase = self.prev(UpperCAmelCase_ ) return result def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): return self.query(UpperCAmelCase_ , index + 1 ) def __snake_case ( self , UpperCAmelCase_ ): value -= self.tree[0] if value < 0: return -1 lowerCAmelCase = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 lowerCAmelCase = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
33
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
1
def UpperCAmelCase ( _snake_case ): lowerCAmelCase = [1] lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 0, 0 lowerCAmelCase = ugly_nums[ia] * 2 lowerCAmelCase = ugly_nums[ia] * 3 lowerCAmelCase = ugly_nums[ia] * 5 for _ in range(1 , _snake_case ): lowerCAmelCase = min(_snake_case , _snake_case , _snake_case ) ugly_nums.append(_snake_case ) if next_num == next_a: ia += 1 lowerCAmelCase = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 lowerCAmelCase = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 lowerCAmelCase = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'''{ugly_numbers(200) = }''')
33
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
1
def UpperCAmelCase ( ): lowerCAmelCase = 0 for i in range(1 , 1001 ): total += i**i return str(_snake_case )[-10:] if __name__ == "__main__": print(solution())
33
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
33
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : int =["""image_processor""", """tokenizer"""] __a : Any ="""BlipImageProcessor""" __a : Union[str, Any] ="""AutoTokenizer""" def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = False super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = self.image_processor def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowerCAmelCase = self.tokenizer lowerCAmelCase = self.tokenizer( text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) return text_encoding # add pixel_values lowerCAmelCase = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) if text is not None: lowerCAmelCase = self.tokenizer( text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) else: lowerCAmelCase = None if text_encoding is not None: encoding_image_processor.update(UpperCAmelCase_ ) return encoding_image_processor def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __snake_case ( self ): lowerCAmelCase = self.tokenizer.model_input_names lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
33
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ ={"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
1
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( '''kwargs, expected''' , [ ({'''num_shards''': 0, '''max_num_jobs''': 1}, []), ({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]), ({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(_snake_case , i + 1 ) for i in range(10 )]), ({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]), ({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = _distribute_shards(**_snake_case ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, max_num_jobs, expected''' , [ ({'''foo''': 0}, 10, [{'''foo''': 0}]), ({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]), ({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]), ({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]), ({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]), ] , ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = _split_gen_kwargs(_snake_case , _snake_case ) assert out == expected @pytest.mark.parametrize( '''gen_kwargs, expected''' , [ ({'''foo''': 0}, 1), ({'''shards''': [0]}, 1), ({'''shards''': [0, 1, 2, 3]}, 4), ({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4), ({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4), ({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError), ] , ) def UpperCAmelCase ( _snake_case , _snake_case ): if expected is RuntimeError: with pytest.raises(_snake_case ): _number_of_shards_in_gen_kwargs(_snake_case ) else: lowerCAmelCase = _number_of_shards_in_gen_kwargs(_snake_case ) assert out == expected
33
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""trocr""" __a : List[Any] =["""past_key_values"""] __a : Dict ={ """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self , UpperCAmelCase_=5_02_65 , UpperCAmelCase_=10_24 , UpperCAmelCase_=12 , UpperCAmelCase_=16 , UpperCAmelCase_=40_96 , UpperCAmelCase_="gelu" , UpperCAmelCase_=5_12 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=0.0 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = decoder_layers lowerCAmelCase = decoder_attention_heads lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = activation_function lowerCAmelCase = max_position_embeddings lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = init_std lowerCAmelCase = decoder_layerdrop lowerCAmelCase = use_cache lowerCAmelCase = scale_embedding lowerCAmelCase = use_learned_position_embeddings lowerCAmelCase = layernorm_embedding super().__init__( pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
1
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class __UpperCamelCase : '''simple docstring''' __a : List[Any] =MBartConfig __a : List[Any] ={} __a : str ="""gelu""" def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=99 , UpperCAmelCase_=32 , UpperCAmelCase_=2 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , ): lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = eos_token_id lowerCAmelCase = pad_token_id lowerCAmelCase = bos_token_id def __snake_case ( self ): lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCAmelCase = prepare_mbart_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return config, inputs_dict def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = TFMBartModel(config=UpperCAmelCase_ ).get_decoder() lowerCAmelCase = inputs_dict['''input_ids'''] lowerCAmelCase = input_ids[:1, :] lowerCAmelCase = inputs_dict['''attention_mask'''][:1, :] lowerCAmelCase = inputs_dict['''head_mask'''] lowerCAmelCase = 1 # first forward pass lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase = outputs.to_tuple() lowerCAmelCase = past_key_values[1] def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ): if attention_mask is None: lowerCAmelCase = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __a : Dict =(TFMBartForConditionalGeneration,) if is_tf_available() else () __a : List[Any] =( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __a : Optional[Any] =True __a : int =False __a : Optional[Any] =False def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __snake_case ( self ): lowerCAmelCase = TFMBartModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ ) def __snake_case ( self ): self.config_tester.run_common_tests() def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ ) @require_sentencepiece @require_tokenizers @require_tf class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : Tuple =[ """ UN Chief Says There Is No Military Solution in Syria""", ] __a : Any =[ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] __a : Optional[Any] ="""facebook/mbart-large-en-ro""" @cached_property def __snake_case ( self ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __snake_case ( self ): lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __snake_case ( self , **UpperCAmelCase_ ): lowerCAmelCase = self.translate_src_text(**UpperCAmelCase_ ) self.assertListEqual(self.expected_text , UpperCAmelCase_ ) def __snake_case ( self , **UpperCAmelCase_ ): lowerCAmelCase = self.tokenizer(self.src_text , **UpperCAmelCase_ , return_tensors='''tf''' ) lowerCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) lowerCAmelCase = self.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) return generated_words @slow def __snake_case ( self ): self._assert_generated_batch_equal_expected()
33
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
1
from __future__ import annotations def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [] lowerCAmelCase , lowerCAmelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) lowerCAmelCase = result + left + right return input_list def UpperCAmelCase ( _snake_case ): if len(_snake_case ) <= 1: return input_list lowerCAmelCase = list(_snake_case ) # iteration for two-way merging lowerCAmelCase = 2 while p <= len(_snake_case ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(_snake_case ) , _snake_case ): lowerCAmelCase = i lowerCAmelCase = i + p - 1 lowerCAmelCase = (low + high + 1) // 2 lowerCAmelCase = merge(_snake_case , _snake_case , _snake_case , _snake_case ) # final merge of last two parts if p * 2 >= len(_snake_case ): lowerCAmelCase = i lowerCAmelCase = merge(_snake_case , 0 , _snake_case , len(_snake_case ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": UpperCAmelCase_ =input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": UpperCAmelCase_ =[] else: UpperCAmelCase_ =[int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
33
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
1
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def UpperCAmelCase ( _snake_case ): lowerCAmelCase = SwinConfig() lowerCAmelCase = swin_name.split('''_''' ) lowerCAmelCase = name_split[1] lowerCAmelCase = int(name_split[4] ) lowerCAmelCase = int(name_split[3][-1] ) if model_size == "tiny": lowerCAmelCase = 96 lowerCAmelCase = (2, 2, 6, 2) lowerCAmelCase = (3, 6, 12, 24) elif model_size == "small": lowerCAmelCase = 96 lowerCAmelCase = (2, 2, 18, 2) lowerCAmelCase = (3, 6, 12, 24) elif model_size == "base": lowerCAmelCase = 128 lowerCAmelCase = (2, 2, 18, 2) lowerCAmelCase = (4, 8, 16, 32) else: lowerCAmelCase = 192 lowerCAmelCase = (2, 2, 18, 2) lowerCAmelCase = (6, 12, 24, 48) if "in22k" in swin_name: lowerCAmelCase = 21841 else: lowerCAmelCase = 1000 lowerCAmelCase = '''huggingface/label-files''' lowerCAmelCase = '''imagenet-1k-id2label.json''' lowerCAmelCase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase = {int(_snake_case ): v for k, v in idalabel.items()} lowerCAmelCase = idalabel lowerCAmelCase = {v: k for k, v in idalabel.items()} lowerCAmelCase = img_size lowerCAmelCase = num_classes lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = num_heads lowerCAmelCase = window_size return config def UpperCAmelCase ( _snake_case ): if "patch_embed.proj" in name: lowerCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase = '''encoder.''' + name if "attn.proj" in name: lowerCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": lowerCAmelCase = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase = '''layernorm.bias''' if "head" in name: lowerCAmelCase = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase = '''swin.''' + name return name def UpperCAmelCase ( _snake_case , _snake_case ): for key in orig_state_dict.copy().keys(): lowerCAmelCase = orig_state_dict.pop(_snake_case ) if "mask" in key: continue elif "qkv" in key: lowerCAmelCase = key.split('''.''' ) lowerCAmelCase = int(key_split[1] ) lowerCAmelCase = int(key_split[3] ) lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase = val[:dim, :] lowerCAmelCase = val[ dim : dim * 2, : ] lowerCAmelCase = val[-dim:, :] else: lowerCAmelCase = val[ :dim ] lowerCAmelCase = val[ dim : dim * 2 ] lowerCAmelCase = val[ -dim: ] else: lowerCAmelCase = val return orig_state_dict def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = timm.create_model(_snake_case , pretrained=_snake_case ) timm_model.eval() lowerCAmelCase = get_swin_config(_snake_case ) lowerCAmelCase = SwinForImageClassification(_snake_case ) model.eval() lowerCAmelCase = convert_state_dict(timm_model.state_dict() , _snake_case ) model.load_state_dict(_snake_case ) lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) lowerCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) lowerCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ) lowerCAmelCase = timm_model(inputs['''pixel_values'''] ) lowerCAmelCase = model(**_snake_case ).logits assert torch.allclose(_snake_case , _snake_case , atol=1E-3 ) print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_snake_case ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) UpperCAmelCase_ =parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
33
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } lowerCAmelCase = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_60_00, '''return_attention_mask''': False, '''do_normalize''': True, } lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(self.tmpdirname , UpperCAmelCase_ ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) # load decoder from hub lowerCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder''' def __snake_case ( self , **UpperCAmelCase_ ): lowerCAmelCase = self.add_kwargs_tokens_map.copy() kwargs.update(UpperCAmelCase_ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def __snake_case ( self , **UpperCAmelCase_ ): return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def __snake_case ( self , **UpperCAmelCase_ ): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCAmelCase_ ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = self.get_decoder() lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def __snake_case ( self ): lowerCAmelCase = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(UpperCAmelCase_ , '''include''' ): WavaVecaProcessorWithLM( tokenizer=UpperCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def __snake_case ( self ): lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_decoder() lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) lowerCAmelCase = floats_list((3, 10_00) ) lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='''np''' ) lowerCAmelCase = processor(UpperCAmelCase_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __snake_case ( self ): lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_decoder() lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) lowerCAmelCase = '''This is a test string''' lowerCAmelCase = processor(text=UpperCAmelCase_ ) lowerCAmelCase = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __snake_case ( self , UpperCAmelCase_=(2, 10, 16) , UpperCAmelCase_=77 ): np.random.seed(UpperCAmelCase_ ) return np.random.rand(*UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_decoder() lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) lowerCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 ) lowerCAmelCase = processor.decode(UpperCAmelCase_ ) lowerCAmelCase = decoder.decode_beams(UpperCAmelCase_ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_decoder() lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) lowerCAmelCase = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCAmelCase = processor.batch_decode(UpperCAmelCase_ ) else: with get_context(UpperCAmelCase_ ).Pool() as pool: lowerCAmelCase = processor.batch_decode(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) with get_context('''fork''' ).Pool() as p: lowerCAmelCase = decoder.decode_beams_batch(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(UpperCAmelCase_ , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(UpperCAmelCase_ , decoded_processor.logit_score ) self.assertListEqual(UpperCAmelCase_ , decoded_processor.lm_score ) def __snake_case ( self ): lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_decoder() lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) lowerCAmelCase = self._get_dummy_logits() lowerCAmelCase = 15 lowerCAmelCase = -20.0 lowerCAmelCase = -4.0 lowerCAmelCase = processor.batch_decode( UpperCAmelCase_ , beam_width=UpperCAmelCase_ , beam_prune_logp=UpperCAmelCase_ , token_min_logp=UpperCAmelCase_ , ) lowerCAmelCase = decoded_processor_out.text lowerCAmelCase = list(UpperCAmelCase_ ) with get_context('''fork''' ).Pool() as pool: lowerCAmelCase = decoder.decode_beams_batch( UpperCAmelCase_ , UpperCAmelCase_ , beam_width=UpperCAmelCase_ , beam_prune_logp=UpperCAmelCase_ , token_min_logp=UpperCAmelCase_ , ) lowerCAmelCase = [d[0][0] for d in decoded_decoder_out] lowerCAmelCase = [d[0][2] for d in decoded_decoder_out] lowerCAmelCase = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , UpperCAmelCase_ ) self.assertTrue(np.array_equal(UpperCAmelCase_ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , UpperCAmelCase_ , atol=1E-3 ) ) self.assertTrue(np.array_equal(UpperCAmelCase_ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , UpperCAmelCase_ , atol=1E-3 ) ) def __snake_case ( self ): lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_decoder() lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) lowerCAmelCase = self._get_dummy_logits() lowerCAmelCase = 2.0 lowerCAmelCase = 5.0 lowerCAmelCase = -20.0 lowerCAmelCase = True lowerCAmelCase = processor.batch_decode( UpperCAmelCase_ , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , unk_score_offset=UpperCAmelCase_ , lm_score_boundary=UpperCAmelCase_ , ) lowerCAmelCase = decoded_processor_out.text lowerCAmelCase = list(UpperCAmelCase_ ) decoder.reset_params( alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , unk_score_offset=UpperCAmelCase_ , lm_score_boundary=UpperCAmelCase_ , ) with get_context('''fork''' ).Pool() as pool: lowerCAmelCase = decoder.decode_beams_batch( UpperCAmelCase_ , UpperCAmelCase_ , ) lowerCAmelCase = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , UpperCAmelCase_ ) lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key] lowerCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCAmelCase = os.listdir(UpperCAmelCase_ ) lowerCAmelCase = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' ) lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(UpperCAmelCase_ ) lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key] lowerCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCAmelCase = os.listdir(UpperCAmelCase_ ) lowerCAmelCase = os.listdir(UpperCAmelCase_ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCAmelCase = floats_list((3, 10_00) ) lowerCAmelCase = processor_wavaveca(UpperCAmelCase_ , return_tensors='''np''' ) lowerCAmelCase = processor_auto(UpperCAmelCase_ , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) lowerCAmelCase = self._get_dummy_logits() lowerCAmelCase = processor_wavaveca.batch_decode(UpperCAmelCase_ ) lowerCAmelCase = processor_auto.batch_decode(UpperCAmelCase_ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def __snake_case ( self ): lowerCAmelCase = self.get_feature_extractor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_decoder() lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def __snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [d[key] for d in offsets] return retrieved_list def __snake_case ( self ): lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCAmelCase = self._get_dummy_logits()[0] lowerCAmelCase = processor.decode(UpperCAmelCase_ , output_word_offsets=UpperCAmelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def __snake_case ( self ): lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCAmelCase = self._get_dummy_logits() lowerCAmelCase = processor.batch_decode(UpperCAmelCase_ , output_word_offsets=UpperCAmelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(UpperCAmelCase_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def __snake_case ( self ): import torch lowerCAmelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=UpperCAmelCase_ ) lowerCAmelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) ) lowerCAmelCase = iter(UpperCAmelCase_ ) lowerCAmelCase = next(UpperCAmelCase_ ) lowerCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) lowerCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCAmelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): lowerCAmelCase = model(UpperCAmelCase_ ).logits.cpu().numpy() lowerCAmelCase = processor.decode(logits[0] , output_word_offsets=UpperCAmelCase_ ) lowerCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCAmelCase = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] lowerCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(UpperCAmelCase_ , '''word''' ) ) , UpperCAmelCase_ ) self.assertEqual(''' '''.join(self.get_from_offsets(UpperCAmelCase_ , '''word''' ) ) , output.text ) # output times lowerCAmelCase = torch.tensor(self.get_from_offsets(UpperCAmelCase_ , '''start_time''' ) ) lowerCAmelCase = torch.tensor(self.get_from_offsets(UpperCAmelCase_ , '''end_time''' ) ) # fmt: off lowerCAmelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) lowerCAmelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=0.01 ) ) self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=0.01 ) )
33
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
1
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self ): lowerCAmelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) lowerCAmelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowerCAmelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids lowerCAmelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids lowerCAmelCase = shift_tokens_right(UpperCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) lowerCAmelCase = model(UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ).logits lowerCAmelCase = optax.softmax_cross_entropy(UpperCAmelCase_ , onehot(UpperCAmelCase_ , logits.shape[-1] ) ).mean() lowerCAmelCase = -(labels.shape[-1] * loss.item()) lowerCAmelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCAmelCase_ =subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") UpperCAmelCase_ =( subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() ) UpperCAmelCase_ ="""|""".join(sys.argv[1:]) UpperCAmelCase_ =re.compile(RF'''^({joined_dirs}).*?\.py$''') UpperCAmelCase_ =[x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
33
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) UpperCAmelCase_ ={ """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = {} state_dict.pop('''pixel_mean''' , _snake_case ) state_dict.pop('''pixel_std''' , _snake_case ) lowerCAmelCase = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCAmelCase = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): lowerCAmelCase = int(re.match(_snake_case , _snake_case ).group(2 ) ) if layer_nb == 0: lowerCAmelCase = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: lowerCAmelCase = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: lowerCAmelCase = key.replace('''layers.2''' , '''proj_out''' ) lowerCAmelCase = value lowerCAmelCase = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case="ybelkada/segment-anything" ): lowerCAmelCase = hf_hub_download(_snake_case , F"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: lowerCAmelCase = SamConfig() elif "sam_vit_l" in model_name: lowerCAmelCase = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowerCAmelCase = SamConfig( vision_config=_snake_case , ) elif "sam_vit_h" in model_name: lowerCAmelCase = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowerCAmelCase = SamConfig( vision_config=_snake_case , ) lowerCAmelCase = torch.load(_snake_case , map_location='''cpu''' ) lowerCAmelCase = replace_keys(_snake_case ) lowerCAmelCase = SamImageProcessor() lowerCAmelCase = SamProcessor(image_processor=_snake_case ) lowerCAmelCase = SamModel(_snake_case ) hf_model.load_state_dict(_snake_case ) lowerCAmelCase = hf_model.to('''cuda''' ) lowerCAmelCase = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' lowerCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('''RGB''' ) lowerCAmelCase = [[[400, 650]]] lowerCAmelCase = [[1]] lowerCAmelCase = processor(images=np.array(_snake_case ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowerCAmelCase = hf_model(**_snake_case ) lowerCAmelCase = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 lowerCAmelCase = processor( images=np.array(_snake_case ) , input_points=_snake_case , input_labels=_snake_case , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowerCAmelCase = hf_model(**_snake_case ) lowerCAmelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 lowerCAmelCase = ((75, 275, 1725, 850),) lowerCAmelCase = processor(images=np.array(_snake_case ) , input_boxes=_snake_case , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowerCAmelCase = hf_model(**_snake_case ) lowerCAmelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. lowerCAmelCase = [[[400, 650], [800, 650]]] lowerCAmelCase = [[1, 1]] lowerCAmelCase = processor( images=np.array(_snake_case ) , input_points=_snake_case , input_labels=_snake_case , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowerCAmelCase = hf_model(**_snake_case ) lowerCAmelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() UpperCAmelCase_ =["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) UpperCAmelCase_ =parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
33
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
1
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __UpperCamelCase : '''simple docstring''' __a : Dict =BlenderbotConfig __a : Dict ={} __a : Dict ="""gelu""" def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=99 , UpperCAmelCase_=32 , UpperCAmelCase_=2 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , ): lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = eos_token_id lowerCAmelCase = pad_token_id lowerCAmelCase = bos_token_id def __snake_case ( self ): lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCAmelCase = prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return config, inputs_dict def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = TFBlenderbotModel(config=UpperCAmelCase_ ).get_decoder() lowerCAmelCase = inputs_dict['''input_ids'''] lowerCAmelCase = input_ids[:1, :] lowerCAmelCase = inputs_dict['''attention_mask'''][:1, :] lowerCAmelCase = inputs_dict['''head_mask'''] lowerCAmelCase = 1 # first forward pass lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0] lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ): if attention_mask is None: lowerCAmelCase = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __a : Union[str, Any] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __a : int =( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __a : List[Any] =True __a : Optional[int] =False __a : Union[str, Any] =False def __snake_case ( self ): lowerCAmelCase = TFBlenderbotModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ ) def __snake_case ( self ): self.config_tester.run_common_tests() def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ ) @require_tokenizers @require_tf class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : Dict =["""My friends are cool but they eat too many carbs."""] __a : Union[str, Any] ="""facebook/blenderbot-400M-distill""" @cached_property def __snake_case ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def __snake_case ( self ): lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def __snake_case ( self ): lowerCAmelCase = self.tokenizer(self.src_text , return_tensors='''tf''' ) lowerCAmelCase = self.model.generate( model_inputs.input_ids , ) lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase_ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
33
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
1
from __future__ import annotations def UpperCAmelCase ( _snake_case , _snake_case ): if len(_snake_case ) < k or k < 0: raise ValueError('''Invalid Input''' ) lowerCAmelCase = lowerCAmelCase = sum(array[:k] ) for i in range(len(_snake_case ) - k ): lowerCAmelCase = current_sum - array[i] + array[i + k] lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() UpperCAmelCase_ =[randint(-1000, 1000) for i in range(100)] UpperCAmelCase_ =randint(0, 110) print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
33
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
1
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCAmelCase_ ={"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
from __future__ import annotations from typing import Any class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = num_of_nodes lowerCAmelCase = [] lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): self.m_edges.append([u_node, v_node, weight] ) def __snake_case ( self , UpperCAmelCase_ ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __snake_case ( self , UpperCAmelCase_ ): if self.m_component[u_node] != u_node: for k in self.m_component: lowerCAmelCase = self.find_component(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if component_size[u_node] <= component_size[v_node]: lowerCAmelCase = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase_ ) elif component_size[u_node] >= component_size[v_node]: lowerCAmelCase = self.find_component(UpperCAmelCase_ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) lowerCAmelCase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edge lowerCAmelCase = self.m_component[u] lowerCAmelCase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): lowerCAmelCase = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edge lowerCAmelCase = self.m_component[u] lowerCAmelCase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 lowerCAmelCase = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def UpperCAmelCase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
33
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : str =["""pixel_values"""] def __init__( self , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = PILImageResampling.BILINEAR , UpperCAmelCase_ = True , UpperCAmelCase_ = 1 / 2_55 , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = size if size is not None else {'''shortest_edge''': 3_84} lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) lowerCAmelCase = do_resize lowerCAmelCase = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56 lowerCAmelCase = resample lowerCAmelCase = do_rescale lowerCAmelCase = rescale_factor lowerCAmelCase = do_normalize lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = PILImageResampling.BICUBIC , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase = size['''shortest_edge'''] if shortest_edge < 3_84: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase = int(shortest_edge / crop_pct ) lowerCAmelCase = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) lowerCAmelCase = resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=UpperCAmelCase_ , size=(shortest_edge, shortest_edge) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( UpperCAmelCase_ , size=(shortest_edge, shortest_edge) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = ChannelDimension.FIRST , **UpperCAmelCase_ , ): lowerCAmelCase = do_resize if do_resize is not None else self.do_resize lowerCAmelCase = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase = resample if resample is not None else self.resample lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase = image_mean if image_mean is not None else self.image_mean lowerCAmelCase = image_std if image_std is not None else self.image_std lowerCAmelCase = size if size is not None else self.size lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ ) lowerCAmelCase = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: lowerCAmelCase = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , crop_pct=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images] if do_rescale: lowerCAmelCase = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images] if do_normalize: lowerCAmelCase = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images] lowerCAmelCase = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images] lowerCAmelCase = {'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
33
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =StableDiffusionPanoramaPipeline __a : Union[str, Any] =TEXT_TO_IMAGE_PARAMS __a : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS __a : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS __a : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS def __snake_case ( self ): torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowerCAmelCase = DDIMScheduler() torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase = CLIPTextModel(UpperCAmelCase_ ) lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ ) lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __snake_case ( self ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def __snake_case ( self ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 ) def __snake_case ( self ): lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ ) lowerCAmelCase = '''french fries''' lowerCAmelCase = sd_pipe(**UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __snake_case ( self ): lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ ) lowerCAmelCase = sd_pipe(**UpperCAmelCase_ , view_batch_size=2 ) lowerCAmelCase = output.images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __snake_case ( self ): lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ ) lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __snake_case ( self ): lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = PNDMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase_ ) lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ ) lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ ) lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = '''stabilityai/stable-diffusion-2-base''' lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' ) lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() lowerCAmelCase = self.get_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) lowerCAmelCase = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def __snake_case ( self ): lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase_ ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() lowerCAmelCase = self.get_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) lowerCAmelCase = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __snake_case ( self ): lowerCAmelCase = 0 def callback_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> None: lowerCAmelCase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCAmelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) lowerCAmelCase = latents[0, -3:, -3:, -1] lowerCAmelCase = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowerCAmelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) lowerCAmelCase = latents[0, -3:, -3:, -1] lowerCAmelCase = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowerCAmelCase = False lowerCAmelCase = '''stabilityai/stable-diffusion-2-base''' lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' ) lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ ) lowerCAmelCase = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() lowerCAmelCase = self.get_inputs() pipe(**UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __snake_case ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase = '''stabilityai/stable-diffusion-2-base''' lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' ) lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ ) lowerCAmelCase = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase = self.get_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ) lowerCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
33
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
1
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCAmelCase ( _snake_case ): return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=_snake_case ) lowerCAmelCase = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[int] ="""sigmoid""" __a : List[Any] ="""softmax""" __a : Dict ="""none""" @add_end_docstrings( __UpperCAmelCase , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] =False __a : Any =ClassificationFunction.NONE def __init__( self , **UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="" , **UpperCAmelCase_ ): # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" lowerCAmelCase = tokenizer_kwargs lowerCAmelCase = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: lowerCAmelCase = self.model.config.return_all_scores if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k is None: lowerCAmelCase = top_k lowerCAmelCase = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase_ , ) if return_all_scores: lowerCAmelCase = None else: lowerCAmelCase = 1 if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCAmelCase = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): lowerCAmelCase = super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCAmelCase = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase_ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __snake_case ( self , UpperCAmelCase_ , **UpperCAmelCase_ ): lowerCAmelCase = self.framework if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return self.tokenizer(**UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) == 1 and isinstance(inputs[0] , UpperCAmelCase_ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): return self.model(**UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=1 , UpperCAmelCase_=True ): # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCAmelCase = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCAmelCase = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: lowerCAmelCase = self.model.config.function_to_apply else: lowerCAmelCase = ClassificationFunction.NONE lowerCAmelCase = model_outputs['''logits'''][0] lowerCAmelCase = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCAmelCase = sigmoid(UpperCAmelCase_ ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCAmelCase = softmax(UpperCAmelCase_ ) elif function_to_apply == ClassificationFunction.NONE: lowerCAmelCase = outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCAmelCase = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase_ ) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase_ : x["score"] , reverse=UpperCAmelCase_ ) if top_k is not None: lowerCAmelCase = dict_scores[:top_k] return dict_scores
33
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
1
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class __UpperCamelCase ( enum.Enum ): '''simple docstring''' __a : List[Any] =0 __a : List[str] =1 __a : Any =2 @add_end_docstrings(__UpperCAmelCase ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[Any] =""" In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. lowerCAmelCase = None if self.model.config.prefix is not None: lowerCAmelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. lowerCAmelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=UpperCAmelCase_ , **self._forward_params ) lowerCAmelCase = {**self._preprocess_params, **preprocess_params} lowerCAmelCase = {**self._forward_params, **forward_params} def __snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): lowerCAmelCase = {} if prefix is not None: lowerCAmelCase = prefix if prefix: lowerCAmelCase = self.tokenizer( UpperCAmelCase_ , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=self.framework ) lowerCAmelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) lowerCAmelCase = handle_long_generation preprocess_params.update(UpperCAmelCase_ ) lowerCAmelCase = generate_kwargs lowerCAmelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) lowerCAmelCase = ReturnType.TENSORS if return_type is not None: lowerCAmelCase = return_type if clean_up_tokenization_spaces is not None: lowerCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: lowerCAmelCase = self.tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) lowerCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __call__( self , UpperCAmelCase_ , **UpperCAmelCase_ ): return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_=None , **UpperCAmelCase_ ): lowerCAmelCase = self.tokenizer( prefix + prompt_text , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=self.framework ) lowerCAmelCase = prompt_text if handle_long_generation == "hole": lowerCAmelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: lowerCAmelCase = generate_kwargs['''max_new_tokens'''] else: lowerCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: lowerCAmelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) lowerCAmelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: lowerCAmelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def __snake_case ( self , UpperCAmelCase_ , **UpperCAmelCase_ ): lowerCAmelCase = model_inputs['''input_ids'''] lowerCAmelCase = model_inputs.get('''attention_mask''' , UpperCAmelCase_ ) # Allow empty prompts if input_ids.shape[1] == 0: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = 1 else: lowerCAmelCase = input_ids.shape[0] lowerCAmelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. lowerCAmelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: lowerCAmelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: lowerCAmelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length lowerCAmelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL lowerCAmelCase = self.model.generate(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase = generated_sequence.shape[0] if self.framework == "pt": lowerCAmelCase = generated_sequence.reshape(UpperCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": lowerCAmelCase = tf.reshape(UpperCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=ReturnType.FULL_TEXT , UpperCAmelCase_=True ): lowerCAmelCase = model_outputs['''generated_sequence'''][0] lowerCAmelCase = model_outputs['''input_ids'''] lowerCAmelCase = model_outputs['''prompt_text'''] lowerCAmelCase = generated_sequence.numpy().tolist() lowerCAmelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: lowerCAmelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text lowerCAmelCase = self.tokenizer.decode( UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: lowerCAmelCase = 0 else: lowerCAmelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , ) ) if return_type == ReturnType.FULL_TEXT: lowerCAmelCase = prompt_text + text[prompt_length:] else: lowerCAmelCase = text[prompt_length:] lowerCAmelCase = {'''generated_text''': all_text} records.append(UpperCAmelCase_ ) return records
33
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
1
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=30 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=32 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=10 , UpperCAmelCase_=0.02 , UpperCAmelCase_=None , UpperCAmelCase_=2 , ): lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = scope lowerCAmelCase = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase = (image_size // patch_size) ** 2 lowerCAmelCase = num_patches + 1 def __snake_case ( self ): lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = self.get_config() return config, pixel_values, labels def __snake_case ( self ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = ViTModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = ViTForMaskedImageModeling(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = model(UpperCAmelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase = 1 lowerCAmelCase = ViTForMaskedImageModeling(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase = model(UpperCAmelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = self.type_sequence_label_size lowerCAmelCase = ViTForImageClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase = 1 lowerCAmelCase = ViTForImageClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __snake_case ( self ): lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : List[str] =( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) __a : int =( {"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification} if is_torch_available() else {} ) __a : List[Any] =True __a : List[Any] =False __a : List[Any] =False __a : Any =False def __snake_case ( self ): lowerCAmelCase = ViTModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 ) def __snake_case ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __snake_case ( self ): pass def __snake_case ( self ): lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(UpperCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) ) def __snake_case ( self ): lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(UpperCAmelCase_ ) lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase = [*signature.parameters.keys()] lowerCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) @slow def __snake_case ( self ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = ViTModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def UpperCAmelCase ( ): lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def __snake_case ( self ): return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def __snake_case ( self ): lowerCAmelCase = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(UpperCAmelCase_ ) lowerCAmelCase = self.default_image_processor lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**UpperCAmelCase_ ) # verify the logits lowerCAmelCase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) lowerCAmelCase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) @slow def __snake_case ( self ): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. lowerCAmelCase = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(UpperCAmelCase_ ) lowerCAmelCase = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 ) lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ) lowerCAmelCase = inputs.pixel_values.to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ ) # verify the logits lowerCAmelCase = torch.Size((1, 36_01, 3_84) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ ) lowerCAmelCase = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __snake_case ( self ): lowerCAmelCase = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' ) lowerCAmelCase = self.default_image_processor lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ) lowerCAmelCase = inputs.pixel_values.to(UpperCAmelCase_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowerCAmelCase = model(UpperCAmelCase_ )
33
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
1
def UpperCAmelCase ( _snake_case = 100 ): lowerCAmelCase = (n * (n + 1) // 2) ** 2 lowerCAmelCase = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
33
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
1
import numpy as np def UpperCAmelCase ( _snake_case ): return 1 / (1 + np.exp(-vector )) def UpperCAmelCase ( _snake_case ): return vector * sigmoid(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
33
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed UpperCAmelCase_ ={ """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def UpperCAmelCase ( _snake_case ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def UpperCAmelCase ( _snake_case , _snake_case ): if args.student_type == "roberta": lowerCAmelCase = False elif args.student_type == "gpt2": lowerCAmelCase = False def UpperCAmelCase ( _snake_case , _snake_case ): if args.student_type == "roberta": lowerCAmelCase = False def UpperCAmelCase ( ): lowerCAmelCase = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=_snake_case , required=_snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=_snake_case , required=_snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=_snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_snake_case , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=_snake_case , required=_snake_case , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=_snake_case , type=_snake_case , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_snake_case , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=_snake_case , required=_snake_case , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=_snake_case , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=_snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=_snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=_snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=_snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=_snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=_snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=_snake_case , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=_snake_case , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=_snake_case , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=_snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=_snake_case , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=_snake_case , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=_snake_case , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=_snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=_snake_case , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=_snake_case , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=_snake_case , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=_snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_snake_case , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=_snake_case , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=_snake_case , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=_snake_case , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=_snake_case , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=_snake_case , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=_snake_case , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=_snake_case , default=4000 , help='''Checkpoint interval.''' ) lowerCAmelCase = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(F"""Param: {args}""" ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = MODEL_CLASSES[args.student_type] lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowerCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowerCAmelCase = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowerCAmelCase = tokenizer.all_special_tokens.index(_snake_case ) lowerCAmelCase = tokenizer.all_special_ids[idx] logger.info(F"""Special tokens {special_tok_ids}""" ) lowerCAmelCase = special_tok_ids lowerCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F"""Loading data from {args.data_file}""" ) with open(args.data_file , '''rb''' ) as fp: lowerCAmelCase = pickle.load(_snake_case ) if args.mlm: logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , '''rb''' ) as fp: lowerCAmelCase = pickle.load(_snake_case ) lowerCAmelCase = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowerCAmelCase = 0.0 # do not predict special tokens lowerCAmelCase = torch.from_numpy(_snake_case ) else: lowerCAmelCase = None lowerCAmelCase = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F"""Loading student config from {args.student_config}""" ) lowerCAmelCase = student_config_class.from_pretrained(args.student_config ) lowerCAmelCase = True if args.student_pretrained_weights is not None: logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" ) lowerCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: lowerCAmelCase = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F"""cuda:{args.local_rank}""" ) logger.info('''Student loaded.''' ) # TEACHER # lowerCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F"""cuda:{args.local_rank}""" ) logger.info(F"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowerCAmelCase = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
33
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
1
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): # Construct model if gpta_config_file == "": lowerCAmelCase = GPTaConfig() else: lowerCAmelCase = GPTaConfig.from_json_file(_snake_case ) lowerCAmelCase = GPTaModel(_snake_case ) # Load weights from numpy load_tf_weights_in_gpta(_snake_case , _snake_case , _snake_case ) # Save pytorch-model lowerCAmelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME lowerCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , _snake_case ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase_ =parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
33
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def UpperCAmelCase ( _snake_case ): lowerCAmelCase = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : List[Any] =StableDiffusionLatentUpscalePipeline __a : Optional[int] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { """height""", """width""", """cross_attention_kwargs""", """negative_prompt_embeds""", """prompt_embeds""", } __a : Dict =PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""} __a : Tuple =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a : List[str] =frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __a : Union[str, Any] =frozenset([] ) __a : Optional[int] =True @property def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = 4 lowerCAmelCase = (16, 16) lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ ) return image def __snake_case ( self ): torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=UpperCAmelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=UpperCAmelCase_ , only_cross_attention=UpperCAmelCase_ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) lowerCAmelCase = EulerDiscreteScheduler(prediction_type='''sample''' ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , ) lowerCAmelCase = CLIPTextModel(UpperCAmelCase_ ) lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCAmelCase = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = '''cpu''' lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**UpperCAmelCase_ ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ ) lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) lowerCAmelCase = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCAmelCase_ , 1E-3 ) def __snake_case ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def __snake_case ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def __snake_case ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def __snake_case ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def __snake_case ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def __snake_case ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def __snake_case ( self ): lowerCAmelCase = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**UpperCAmelCase_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=UpperCAmelCase_ ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ ) lowerCAmelCase = 2 lowerCAmelCase = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue lowerCAmelCase = getattr(UpperCAmelCase_ , scheduler_enum.name ) lowerCAmelCase = scheduler_cls.from_config(pipe.scheduler.config ) lowerCAmelCase = pipe(**UpperCAmelCase_ )[0] outputs.append(UpperCAmelCase_ ) assert check_same_shape(UpperCAmelCase_ ) @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self ): lowerCAmelCase = torch.manual_seed(33 ) lowerCAmelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) lowerCAmelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' lowerCAmelCase = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='''latent''' ).images lowerCAmelCase = upscaler( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase_ , output_type='''np''' , ).images[0] lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5E-2 def __snake_case ( self ): lowerCAmelCase = torch.manual_seed(33 ) lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) lowerCAmelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) lowerCAmelCase = upscaler( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase_ , output_type='''np''' , ).images[0] lowerCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-2
33
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
1
from math import factorial def UpperCAmelCase ( _snake_case , _snake_case ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", F'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( """If a class of 40 students must be arranged into groups of""", F'''4 for group projects, there are {combinations(40, 4)} ways''', """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", F'''are {combinations(10, 3)} ways that first, second and''', """third place can be awarded.""", )
33
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
1
def UpperCAmelCase ( _snake_case ): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCAmelCase = grid[0] for row_n in range(1 , len(_snake_case ) ): lowerCAmelCase = grid[row_n] lowerCAmelCase = fill_row(_snake_case , _snake_case ) lowerCAmelCase = grid[row_n] return grid[-1][-1] def UpperCAmelCase ( _snake_case , _snake_case ): current_row[0] += row_above[0] for cell_n in range(1 , len(_snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
33
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
1
import os from datetime import datetime as dt from github import Github UpperCAmelCase_ =[ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def UpperCAmelCase ( ): lowerCAmelCase = Github(os.environ['''GITHUB_TOKEN'''] ) lowerCAmelCase = g.get_repo('''huggingface/diffusers''' ) lowerCAmelCase = repo.get_issues(state='''open''' ) for issue in open_issues: lowerCAmelCase = sorted(issue.get_comments() , key=lambda _snake_case : i.created_at , reverse=_snake_case ) lowerCAmelCase = comments[0] if len(_snake_case ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
33
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
1
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =(CMStochasticIterativeScheduler,) __a : str =1_0 def __snake_case ( self , **UpperCAmelCase_ ): lowerCAmelCase = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**UpperCAmelCase_ ) return config def __snake_case ( self ): lowerCAmelCase = 10 lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = self.scheduler_classes[0](**UpperCAmelCase_ ) scheduler.set_timesteps(UpperCAmelCase_ ) lowerCAmelCase = scheduler.timesteps[0] lowerCAmelCase = scheduler.timesteps[1] lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __snake_case ( self ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_ ) def __snake_case ( self ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = 1 scheduler.set_timesteps(UpperCAmelCase_ ) lowerCAmelCase = scheduler.timesteps lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCAmelCase_ ): # 1. scale model input lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) # 2. predict noise residual lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) # 3. predict previous sample x_t-1 lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase_ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_sum.item() - 192.7614 ) < 1E-2 assert abs(result_mean.item() - 0.2510 ) < 1E-3 def __snake_case ( self ): lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = [1_06, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase_ ) lowerCAmelCase = scheduler.timesteps lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) # 2. predict noise residual lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) # 3. predict previous sample x_t-1 lowerCAmelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase_ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase_ ) ) assert abs(result_sum.item() - 347.6357 ) < 1E-2 assert abs(result_mean.item() - 0.4527 ) < 1E-3 def __snake_case ( self ): lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = [39, 30, 12, 15, 0] with self.assertRaises(UpperCAmelCase_ , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = [39, 30, 12, 1, 0] lowerCAmelCase = len(UpperCAmelCase_ ) with self.assertRaises(UpperCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase_ ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
33
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
1
def UpperCAmelCase ( _snake_case ): if not isinstance(_snake_case , _snake_case ): lowerCAmelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_snake_case ) if number < 1: lowerCAmelCase = F"""Input value of [number={number}] must be > 0""" raise ValueError(_snake_case ) lowerCAmelCase = 1 for i in range(1 , _snake_case ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
from __future__ import annotations from statistics import mean def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * no_of_processes lowerCAmelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_snake_case ): lowerCAmelCase = burst_time[i] lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowerCAmelCase = [] lowerCAmelCase = -1 for i in range(_snake_case ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowerCAmelCase = i total_time += burst_time[target_process] completed += 1 lowerCAmelCase = 0 lowerCAmelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * no_of_processes for i in range(_snake_case ): lowerCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") UpperCAmelCase_ =4 UpperCAmelCase_ =[2, 5, 3, 7] UpperCAmelCase_ =[0, 0, 0, 0] UpperCAmelCase_ =calculate_waitingtime(arrival_time, burst_time, no_of_processes) UpperCAmelCase_ =calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
33
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
1
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
33
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } UpperCAmelCase_ =[ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def UpperCAmelCase ( _snake_case ): lowerCAmelCase = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): lowerCAmelCase = line.strip() if line: lowerCAmelCase = line.split() lowerCAmelCase = line_number lowerCAmelCase = words[0] lowerCAmelCase = value return result def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): for attribute in key.split('''.''' ): lowerCAmelCase = getattr(_snake_case , _snake_case ) lowerCAmelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): lowerCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]] lowerCAmelCase = '''param''' if weight_type is not None and weight_type != "param": lowerCAmelCase = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": lowerCAmelCase = hf_pointer for attribute in hf_param_name.split('''.''' ): lowerCAmelCase = getattr(_snake_case , _snake_case ) lowerCAmelCase = shape_pointer.shape # let's reduce dimension lowerCAmelCase = value[0] else: lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCAmelCase = value elif weight_type == "weight_g": lowerCAmelCase = value elif weight_type == "weight_v": lowerCAmelCase = value elif weight_type == "bias": lowerCAmelCase = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): lowerCAmelCase = getattr(_snake_case , _snake_case ) lowerCAmelCase = value else: lowerCAmelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): lowerCAmelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): lowerCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]] lowerCAmelCase = '''param''' if weight_type is not None and weight_type != "param": lowerCAmelCase = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": lowerCAmelCase = '''.'''.join([key, hf_param_name] ) else: lowerCAmelCase = key lowerCAmelCase = value if '''lm_head''' in full_key else value[0] UpperCAmelCase_ ={ """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def UpperCAmelCase ( _snake_case , _snake_case , _snake_case=None , _snake_case=None ): lowerCAmelCase = False for key, mapped_key in MAPPING.items(): lowerCAmelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowerCAmelCase = True if "*" in mapped_key: lowerCAmelCase = name.split(_snake_case )[0].split('''.''' )[-2] lowerCAmelCase = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: lowerCAmelCase = '''weight_g''' elif "weight_v" in name: lowerCAmelCase = '''weight_v''' elif "bias" in name: lowerCAmelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCAmelCase = '''weight''' else: lowerCAmelCase = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [] lowerCAmelCase = fairseq_model.state_dict() lowerCAmelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) lowerCAmelCase = True else: lowerCAmelCase = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(F"""Unused weights: {unused_weights}""" ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): lowerCAmelCase = full_name.split('''conv_layers.''' )[-1] lowerCAmelCase = name.split('''.''' ) lowerCAmelCase = int(items[0] ) lowerCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCAmelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCAmelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def UpperCAmelCase ( _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case=False ): if config_path is not None: lowerCAmelCase = WavaVecaConfig.from_pretrained(_snake_case ) else: lowerCAmelCase = WavaVecaConfig() if is_seq_class: lowerCAmelCase = read_txt_into_dict(_snake_case ) lowerCAmelCase = idalabel lowerCAmelCase = WavaVecaForSequenceClassification(_snake_case ) lowerCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: lowerCAmelCase = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase = target_dict.pad_index lowerCAmelCase = target_dict.bos_index lowerCAmelCase = target_dict.eos_index lowerCAmelCase = len(target_dict.symbols ) lowerCAmelCase = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) lowerCAmelCase = target_dict.indices # fairseq has the <pad> and <s> switched lowerCAmelCase = 0 lowerCAmelCase = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) lowerCAmelCase = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) lowerCAmelCase = True if config.feat_extract_norm == '''layer''' else False lowerCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) lowerCAmelCase = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) lowerCAmelCase = WavaVecaForCTC(_snake_case ) else: lowerCAmelCase = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowerCAmelCase = argparse.Namespace(task='''audio_pretraining''' ) lowerCAmelCase = fairseq.tasks.setup_task(_snake_case ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) lowerCAmelCase = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) UpperCAmelCase_ =parser.parse_args() UpperCAmelCase_ =not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
33
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
1
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
1
from collections.abc import Sequence from queue import Queue class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ): lowerCAmelCase = start lowerCAmelCase = end lowerCAmelCase = val lowerCAmelCase = (start + end) // 2 lowerCAmelCase = left lowerCAmelCase = right def __repr__( self ): return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = collection lowerCAmelCase = function if self.collection: lowerCAmelCase = self._build_tree(0 , len(UpperCAmelCase_ ) - 1 ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): self._update_tree(self.root , UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): return self._query_range(self.root , UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): if start == end: return SegmentTreeNode(UpperCAmelCase_ , UpperCAmelCase_ , self.collection[start] ) lowerCAmelCase = (start + end) // 2 lowerCAmelCase = self._build_tree(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = self._build_tree(mid + 1 , UpperCAmelCase_ ) return SegmentTreeNode(UpperCAmelCase_ , UpperCAmelCase_ , self.fn(left.val , right.val ) , UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if node.start == i and node.end == i: lowerCAmelCase = val return if i <= node.mid: self._update_tree(node.left , UpperCAmelCase_ , UpperCAmelCase_ ) else: self._update_tree(node.right , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = self.fn(node.left.val , node.right.val ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , UpperCAmelCase_ , UpperCAmelCase_ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , UpperCAmelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCAmelCase_ ) , ) else: # range in right child tree return self._query_range(node.right , UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): if self.root is not None: lowerCAmelCase = Queue() queue.put(self.root ) while not queue.empty(): lowerCAmelCase = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 50) UpperCAmelCase_ =SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
import os import sys UpperCAmelCase_ =os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) UpperCAmelCase_ =[ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def UpperCAmelCase ( *_snake_case , **_snake_case ): return AutoConfig.from_pretrained(*_snake_case , **_snake_case ) @add_start_docstrings(AutoTokenizer.__doc__ ) def UpperCAmelCase ( *_snake_case , **_snake_case ): return AutoTokenizer.from_pretrained(*_snake_case , **_snake_case ) @add_start_docstrings(AutoModel.__doc__ ) def UpperCAmelCase ( *_snake_case , **_snake_case ): return AutoModel.from_pretrained(*_snake_case , **_snake_case ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def UpperCAmelCase ( *_snake_case , **_snake_case ): return AutoModelForCausalLM.from_pretrained(*_snake_case , **_snake_case ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def UpperCAmelCase ( *_snake_case , **_snake_case ): return AutoModelForMaskedLM.from_pretrained(*_snake_case , **_snake_case ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def UpperCAmelCase ( *_snake_case , **_snake_case ): return AutoModelForSequenceClassification.from_pretrained(*_snake_case , **_snake_case ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def UpperCAmelCase ( *_snake_case , **_snake_case ): return AutoModelForQuestionAnswering.from_pretrained(*_snake_case , **_snake_case )
33
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def UpperCAmelCase ( _snake_case , _snake_case=10 ): lowerCAmelCase = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def UpperCAmelCase ( _snake_case , _snake_case=10 ): lowerCAmelCase = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase = os.path.join(_snake_case , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _snake_case ) lowerCAmelCase = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ ) lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_00 ): lowerCAmelCase = criterion(UpperCAmelCase_ , UpperCAmelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __snake_case ( self ): lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ ) lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase = Adafactor( params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase_ , weight_decay=0.0 , relative_step=UpperCAmelCase_ , scale_parameter=UpperCAmelCase_ , warmup_init=UpperCAmelCase_ , ) for _ in range(10_00 ): lowerCAmelCase = criterion(UpperCAmelCase_ , UpperCAmelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : Optional[Any] =nn.Linear(5_0 , 5_0 ) if is_torch_available() else None __a : Union[str, Any] =AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __a : Any =1_0 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ , msg=UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase , lowerCAmelCase = data lowerCAmelCase = scheduler_func(self.optimizer , **UpperCAmelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase = unwrap_schedule(UpperCAmelCase_ , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase_ , UpperCAmelCase_ , tol=1E-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , ) lowerCAmelCase = scheduler_func(self.optimizer , **UpperCAmelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase_ ) # wrap to test picklability of the schedule lowerCAmelCase = unwrap_and_save_reload_schedule(UpperCAmelCase_ , self.num_steps ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ , msg=F"""failed for {scheduler_func} in save and reload""" ) class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = fn def __call__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.fn(*UpperCAmelCase_ , **UpperCAmelCase_ ) @classmethod def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = list(map(self , scheduler.lr_lambdas ) )
33
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''ylacombe/bark-small''' lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = '''en_speaker_1''' lowerCAmelCase = '''This is a test string''' lowerCAmelCase = '''speaker_embeddings_path.json''' lowerCAmelCase = '''speaker_embeddings''' def __snake_case ( self , **UpperCAmelCase_ ): return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def __snake_case ( self ): lowerCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def __snake_case ( self ): lowerCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCAmelCase = 35 lowerCAmelCase = 2 lowerCAmelCase = 8 lowerCAmelCase = { '''semantic_prompt''': np.ones(UpperCAmelCase_ ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowerCAmelCase = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) lowerCAmelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowerCAmelCase = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) lowerCAmelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowerCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset ) def __snake_case ( self ): lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = BarkProcessor(tokenizer=UpperCAmelCase_ ) lowerCAmelCase = processor(text=self.input_string ) lowerCAmelCase = tokenizer( self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
33
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
1
UpperCAmelCase_ ={ """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) UpperCAmelCase_ ={ """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = from_type.lower().strip('''s''' ) lowerCAmelCase = to_type.lower().strip('''s''' ) lowerCAmelCase = UNIT_SYMBOL.get(_snake_case , _snake_case ) lowerCAmelCase = UNIT_SYMBOL.get(_snake_case , _snake_case ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_snake_case )}""" ) raise ValueError(_snake_case ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_snake_case )}""" ) raise ValueError(_snake_case ) lowerCAmelCase = METRIC_CONVERSION[from_sanitized] lowerCAmelCase = METRIC_CONVERSION[to_sanitized] lowerCAmelCase = 1 if from_exponent > to_exponent: lowerCAmelCase = from_exponent - to_exponent else: lowerCAmelCase = -(to_exponent - from_exponent) return value * pow(10 , _snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
33
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
1
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
33
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
1
import os import jsonlines import numpy as np from tqdm import tqdm UpperCAmelCase_ =2048 UpperCAmelCase_ =4096 UpperCAmelCase_ =42 UpperCAmelCase_ =os.environ.pop("""PROCESS_TRAIN""", """false""") UpperCAmelCase_ ={"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4} def UpperCAmelCase ( _snake_case ): def choose_first(_snake_case , _snake_case=False ): assert isinstance(_snake_case , _snake_case ) if len(_snake_case ) == 1: lowerCAmelCase = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: lowerCAmelCase = {k: [a[k]] for k in a} if len(a['''start_token'''] ) > 0: break return a lowerCAmelCase = {'''id''': example['''id''']} lowerCAmelCase = example['''annotations'''] lowerCAmelCase = annotation['''yes_no_answer'''] if 0 in yes_no_answer or 1 in yes_no_answer: lowerCAmelCase = ['''yes'''] if 1 in yes_no_answer else ['''no'''] lowerCAmelCase = lowerCAmelCase = [] lowerCAmelCase = lowerCAmelCase = [] lowerCAmelCase = ['''<cls>'''] else: lowerCAmelCase = ['''short'''] lowerCAmelCase = choose_first(annotation['''short_answers'''] ) if len(out['''start_token'''] ) == 0: # answer will be long if short is not available lowerCAmelCase = ['''long'''] lowerCAmelCase = choose_first(annotation['''long_answer'''] , is_long_answer=_snake_case ) lowerCAmelCase = [] answer.update(_snake_case ) # disregard some samples if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]: lowerCAmelCase = True else: lowerCAmelCase = False lowerCAmelCase = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text'''] if not all(isinstance(answer[k] , _snake_case ) for k in cols ): raise ValueError('''Issue in ID''' , example['''id'''] ) return answer def UpperCAmelCase ( _snake_case , _snake_case=False ): lowerCAmelCase = _get_single_answer(_snake_case ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element lowerCAmelCase = example['''document''']['''tokens'''] lowerCAmelCase = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) return { "context": " ".join(_snake_case ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples lowerCAmelCase = ['''start_token''', '''end_token'''] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 lowerCAmelCase = example['''document''']['''tokens'''] lowerCAmelCase = answer['''start_token'''] lowerCAmelCase = answer['''end_token'''] lowerCAmelCase = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 lowerCAmelCase = ''' '''.join(context[start_token:end_token] ) # checking above code if assertion: lowerCAmelCase = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']] lowerCAmelCase = doc['''token'''][answer['''start_token'''] : answer['''end_token''']] lowerCAmelCase = ''' '''.join([old[i] for i in range(len(_snake_case ) ) if not is_html[i]] ) if new != old: print('''ID:''' , example['''id'''] ) print('''New:''' , _snake_case , end='''\n''' ) print('''Old:''' , _snake_case , end='''\n\n''' ) return { "context": " ".join(_snake_case ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def UpperCAmelCase ( _snake_case , _snake_case , _snake_case=2048 , _snake_case=4096 , _snake_case=True ): # overlap will be of doc_stride - q_len lowerCAmelCase = get_context_and_ans(_snake_case , assertion=_snake_case ) lowerCAmelCase = out['''answer'''] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } lowerCAmelCase = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids lowerCAmelCase = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = input_ids[:q_len] lowerCAmelCase = range(_snake_case , len(_snake_case ) , max_length - doc_stride ) for i in doc_start_indices: lowerCAmelCase = i + max_length - q_len lowerCAmelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['''category'''][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(_snake_case ), "end_token": [-100] * len(_snake_case ), "category": category, }, } lowerCAmelCase = out['''context'''].split() lowerCAmelCase = splitted_context[answer['''end_token''']] lowerCAmelCase = len( tokenizer( ''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=_snake_case , ).input_ids ) lowerCAmelCase = len( tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=_snake_case ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token lowerCAmelCase = len(tokenizer(_snake_case , add_special_tokens=_snake_case ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 lowerCAmelCase = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive lowerCAmelCase = answer['''start_token'''] lowerCAmelCase = answer['''end_token'''] if assertion: lowerCAmelCase = tokenizer.decode(_snake_case ) if answer["span"] != new: print('''ISSUE IN TOKENIZATION''' ) print('''OLD:''' , answer['''span'''] ) print('''NEW:''' , _snake_case , end='''\n\n''' ) if len(_snake_case ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } lowerCAmelCase = input_ids[:q_len] lowerCAmelCase = range(_snake_case , len(_snake_case ) , max_length - doc_stride ) lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = [] # null, yes, no, long, short for i in doc_start_indices: lowerCAmelCase = i + max_length - q_len lowerCAmelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: lowerCAmelCase = start_token - i + q_len lowerCAmelCase = end_token - i + q_len answers_category.append(answer['''category'''][0] ) # ["short"] -> "short" else: lowerCAmelCase = -100 lowerCAmelCase = -100 answers_category.append('''null''' ) lowerCAmelCase = inputs[-1][start_token : end_token + 1] answers_start_token.append(_snake_case ) answers_end_token.append(_snake_case ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('''ISSUE in strided for ID:''' , example['''id'''] ) print('''New:''' , tokenizer.decode(_snake_case ) ) print('''Old:''' , tokenizer.decode(_snake_case ) , end='''\n\n''' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def UpperCAmelCase ( _snake_case , _snake_case , _snake_case=2048 , _snake_case=4096 , _snake_case=False ): lowerCAmelCase = get_strided_contexts_and_ans( _snake_case , _snake_case , doc_stride=_snake_case , max_length=_snake_case , assertion=_snake_case , ) return example def UpperCAmelCase ( _snake_case , _snake_case ): with jsonlines.open(_snake_case , '''a''' ) as writer: for example in tqdm(_snake_case , total=len(_snake_case ) , desc='''Saving samples ... ''' ): lowerCAmelCase = example['''labels'''] for ids, start, end, cat in zip( example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { '''input_ids''': ids, '''start_token''': start, '''end_token''': end, '''category''': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer UpperCAmelCase_ =load_dataset("""natural_questions""") UpperCAmelCase_ =BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""") UpperCAmelCase_ =data["""train""" if PROCESS_TRAIN == """true""" else """validation"""] UpperCAmelCase_ ={ """tokenizer""": tokenizer, """doc_stride""": DOC_STRIDE, """max_length""": MAX_LENGTH, """assertion""": False, } UpperCAmelCase_ =data.map(prepare_inputs, fn_kwargs=fn_kwargs) UpperCAmelCase_ =data.remove_columns(["""annotations""", """document""", """id""", """question"""]) print(data) np.random.seed(SEED) UpperCAmelCase_ ="""nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl""" save_to_disk(data, file_name=cache_file_name)
33
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
1
import pickle import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=0.2 , UpperCAmelCase_=0.2 ): lowerCAmelCase = bp_numa lowerCAmelCase = bp_numa lowerCAmelCase = bp_numa lowerCAmelCase = conva_get[:2] lowerCAmelCase = conva_get[2] lowerCAmelCase = size_pa lowerCAmelCase = rate_w lowerCAmelCase = rate_t lowerCAmelCase = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowerCAmelCase = -2 * np.random.rand(self.conva[1] ) + 1 lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1 lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1 def __snake_case ( self , UpperCAmelCase_ ): # save model dict with pickle lowerCAmelCase = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(UpperCAmelCase_ , '''wb''' ) as f: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __snake_case ( cls , UpperCAmelCase_ ): # read saved model with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) # noqa: S301 lowerCAmelCase = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) lowerCAmelCase = model_dic.get('''size_pooling1''' ) lowerCAmelCase = model_dic.get('''num_bp1''' ) lowerCAmelCase = model_dic.get('''num_bp2''' ) lowerCAmelCase = model_dic.get('''num_bp3''' ) lowerCAmelCase = model_dic.get('''rate_weight''' ) lowerCAmelCase = model_dic.get('''rate_thre''' ) # create model instance lowerCAmelCase = CNN(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # modify model parameter lowerCAmelCase = model_dic.get('''w_conv1''' ) lowerCAmelCase = model_dic.get('''wkj''' ) lowerCAmelCase = model_dic.get('''vji''' ) lowerCAmelCase = model_dic.get('''thre_conv1''' ) lowerCAmelCase = model_dic.get('''thre_bp2''' ) lowerCAmelCase = model_dic.get('''thre_bp3''' ) return conv_ins def __snake_case ( self , UpperCAmelCase_ ): return 1 / (1 + np.exp(-1 * x )) def __snake_case ( self , UpperCAmelCase_ ): return round(UpperCAmelCase_ , 3 ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # convolution process lowerCAmelCase = convs[0] lowerCAmelCase = convs[1] lowerCAmelCase = np.shape(UpperCAmelCase_ )[0] # get the data slice of original image data, data_focus lowerCAmelCase = [] for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_ ): for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_ ): lowerCAmelCase = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(UpperCAmelCase_ ) # calculate the feature map of every single kernel, and saved as list of matrix lowerCAmelCase = [] lowerCAmelCase = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(UpperCAmelCase_ ): lowerCAmelCase = [] for i_focus in range(len(UpperCAmelCase_ ) ): lowerCAmelCase = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(UpperCAmelCase_ ) ) lowerCAmelCase = np.asmatrix(UpperCAmelCase_ ).reshape( UpperCAmelCase_ , UpperCAmelCase_ ) data_featuremap.append(UpperCAmelCase_ ) # expanding the data slice to One dimenssion lowerCAmelCase = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(UpperCAmelCase_ ) ) lowerCAmelCase = np.asarray(UpperCAmelCase_ ) return focus_list, data_featuremap def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="average_pool" ): # pooling process lowerCAmelCase = len(featuremaps[0] ) lowerCAmelCase = int(size_map / size_pooling ) lowerCAmelCase = [] for i_map in range(len(UpperCAmelCase_ ) ): lowerCAmelCase = featuremaps[i_map] lowerCAmelCase = [] for i_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): for j_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(UpperCAmelCase_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(UpperCAmelCase_ ) ) lowerCAmelCase = np.asmatrix(UpperCAmelCase_ ).reshape(UpperCAmelCase_ , UpperCAmelCase_ ) featuremap_pooled.append(UpperCAmelCase_ ) return featuremap_pooled def __snake_case ( self , UpperCAmelCase_ ): # expanding three dimension data to one dimension list lowerCAmelCase = [] for i in range(len(UpperCAmelCase_ ) ): lowerCAmelCase = np.shape(data[i] ) lowerCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1] ) lowerCAmelCase = data_listed.getA().tolist()[0] data_expanded.extend(UpperCAmelCase_ ) lowerCAmelCase = np.asarray(UpperCAmelCase_ ) return data_expanded def __snake_case ( self , UpperCAmelCase_ ): # expanding matrix to one dimension list lowerCAmelCase = np.asarray(UpperCAmelCase_ ) lowerCAmelCase = np.shape(UpperCAmelCase_ ) lowerCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = 0 for i_map in range(UpperCAmelCase_ ): lowerCAmelCase = np.ones((size_map, size_map) ) for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): for j in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = pd_pool[ i_pool ] lowerCAmelCase = i_pool + 1 lowerCAmelCase = np.multiply( UpperCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(UpperCAmelCase_ ) return pd_all def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(UpperCAmelCase_ )) ) print((''' - - Shape: Teach_Data ''', np.shape(UpperCAmelCase_ )) ) lowerCAmelCase = 0 lowerCAmelCase = [] lowerCAmelCase = 1_00_00 while rp < n_repeat and mse >= error_accuracy: lowerCAmelCase = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(UpperCAmelCase_ ) ): # print('------------Learning Image: %d--------------'%p) lowerCAmelCase = np.asmatrix(datas_train[p] ) lowerCAmelCase = np.asarray(datas_teach[p] ) lowerCAmelCase , lowerCAmelCase = self.convolute( UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCAmelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga ) lowerCAmelCase = np.shape(UpperCAmelCase_ ) lowerCAmelCase = self._expand(UpperCAmelCase_ ) lowerCAmelCase = data_bp_input lowerCAmelCase = np.dot(UpperCAmelCase_ , self.vji.T ) - self.thre_bpa lowerCAmelCase = self.sig(UpperCAmelCase_ ) lowerCAmelCase = np.dot(UpperCAmelCase_ , self.wkj.T ) - self.thre_bpa lowerCAmelCase = self.sig(UpperCAmelCase_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowerCAmelCase = np.multiply( (data_teach - bp_outa) , np.multiply(UpperCAmelCase_ , (1 - bp_outa) ) ) lowerCAmelCase = np.multiply( np.dot(UpperCAmelCase_ , self.wkj ) , np.multiply(UpperCAmelCase_ , (1 - bp_outa) ) ) lowerCAmelCase = np.dot(UpperCAmelCase_ , self.vji ) lowerCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga) lowerCAmelCase = pd_conva_pooled.T.getA().tolist() lowerCAmelCase = self._calculate_gradient_from_pool( UpperCAmelCase_ , UpperCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowerCAmelCase = self._expand_mat(pd_conva_all[k_conv] ) lowerCAmelCase = self.rate_weight * np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowerCAmelCase = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowerCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowerCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowerCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre lowerCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowerCAmelCase = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowerCAmelCase = rp + 1 lowerCAmelCase = error_count / patterns all_mse.append(UpperCAmelCase_ ) def draw_error(): lowerCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(UpperCAmelCase_ , '''+-''' ) plt.plot(UpperCAmelCase_ , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(UpperCAmelCase_ , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __snake_case ( self , UpperCAmelCase_ ): # model predict lowerCAmelCase = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(UpperCAmelCase_ )) ) for p in range(len(UpperCAmelCase_ ) ): lowerCAmelCase = np.asmatrix(datas_test[p] ) lowerCAmelCase , lowerCAmelCase = self.convolute( UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCAmelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga ) lowerCAmelCase = self._expand(UpperCAmelCase_ ) lowerCAmelCase = data_bp_input lowerCAmelCase = bp_outa * self.vji.T - self.thre_bpa lowerCAmelCase = self.sig(UpperCAmelCase_ ) lowerCAmelCase = bp_outa * self.wkj.T - self.thre_bpa lowerCAmelCase = self.sig(UpperCAmelCase_ ) produce_out.extend(bp_outa.getA().tolist() ) lowerCAmelCase = [list(map(self.do_round , UpperCAmelCase_ ) ) for each in produce_out] return np.asarray(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # return the data of image after convoluting process so we can check it out lowerCAmelCase = np.asmatrix(UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase = self.convolute( UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCAmelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
33
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__UpperCAmelCase ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : str =field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __a : ClassVar[Features] =Features({"""image""": Image()} ) __a : ClassVar[Features] =Features({"""labels""": ClassLabel} ) __a : str ="image" __a : str ="labels" def __snake_case ( self , UpperCAmelCase_ ): if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , UpperCAmelCase_ ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) lowerCAmelCase = copy.deepcopy(self ) lowerCAmelCase = self.label_schema.copy() lowerCAmelCase = features[self.label_column] lowerCAmelCase = label_schema return task_template @property def __snake_case ( self ): return { self.image_column: "image", self.label_column: "labels", }
33
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCAmelCase_ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCAmelCase_ =[0, 25, 50] UpperCAmelCase_ =[25, 50, 75] UpperCAmelCase_ =fuzz.membership.trimf(X, abca) UpperCAmelCase_ =fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCAmelCase_ =np.ones(75) UpperCAmelCase_ =np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) UpperCAmelCase_ =fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCAmelCase_ =fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCAmelCase_ =fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCAmelCase_ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCAmelCase_ =young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCAmelCase_ =young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCAmelCase_ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCAmelCase_ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
33
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase_ ={"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = 0 @slow def __snake_case ( self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCAmelCase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCAmelCase_ ) , 0 ) def __snake_case ( self ): lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def __snake_case ( self ): lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def __snake_case ( self ): lowerCAmelCase = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) # Check that tokenizer_type ≠ model_type lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCAmelCase_ , '''vocab.txt''' ) ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type='''bert''' , use_fast=UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCAmelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCAmelCase_ , '''merges.txt''' ) ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) @require_tokenizers def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCAmelCase_ , '''vocab.txt''' ) ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type='''bert''' ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCAmelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCAmelCase_ , '''merges.txt''' ) ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type='''gpt2''' ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): with pytest.raises(UpperCAmelCase_ ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def __snake_case ( self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowerCAmelCase = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCAmelCase_ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCAmelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_12 ) @require_tokenizers def __snake_case ( self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCAmelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): lowerCAmelCase = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def __snake_case ( self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowerCAmelCase = TOKENIZER_MAPPING.values() lowerCAmelCase = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCAmelCase_ ) @require_tokenizers def __snake_case ( self ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCAmelCase_ ) @require_tokenizers def __snake_case ( self ): lowerCAmelCase = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCAmelCase_ ) lowerCAmelCase = '''Hello, world. How are you?''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) lowerCAmelCase = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def __snake_case ( self ): lowerCAmelCase = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_12 ) self.assertEqual(tokenizer.vocab_size , 3_00_00 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def __snake_case ( self ): lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def __snake_case ( self ): lowerCAmelCase = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): # Check we can load the tokenizer config of an online model. lowerCAmelCase = get_tokenizer_config('''bert-base-cased''' ) lowerCAmelCase = config.pop('''_commit_hash''' , UpperCAmelCase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCAmelCase_ , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowerCAmelCase = get_tokenizer_config(UpperCAmelCase_ ) self.assertDictEqual(UpperCAmelCase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = get_tokenizer_config(UpperCAmelCase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def __snake_case ( self ): try: AutoConfig.register('''custom''' , UpperCAmelCase_ ) AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ ) lowerCAmelCase = CustomTokenizer.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def __snake_case ( self ): try: AutoConfig.register('''custom''' , UpperCAmelCase_ ) # Can register in two steps AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = BertTokenizerFast.from_pretrained(UpperCAmelCase_ ) bert_tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = CustomTokenizerFast.from_pretrained(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , use_fast=UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __snake_case ( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCAmelCase_ ): lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase_ ): lowerCAmelCase = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCAmelCase_ ) lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCAmelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def __snake_case ( self ): class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any =False class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[str] =NewTokenizer __a : List[Any] =False try: AutoConfig.register('''custom''' , UpperCAmelCase_ ) AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ ) AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ ) # If remote code is not set, the default is to use local lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCAmelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowerCAmelCase = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowerCAmelCase = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) lowerCAmelCase = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __snake_case ( self ): lowerCAmelCase = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCAmelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __snake_case ( self ): with self.assertRaisesRegex( UpperCAmelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase = AutoTokenizer.from_pretrained('''bert-base''' ) def __snake_case ( self ): with self.assertRaisesRegex( UpperCAmelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , revision='''aaaaaa''' ) def __snake_case ( self ): # Make sure we have cached the tokenizer. lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
33
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
1
import numpy as np def UpperCAmelCase ( _snake_case , _snake_case ): return np.where(vector > 0 , _snake_case , (alpha * (np.exp(_snake_case ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
33
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ ={ """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
1
UpperCAmelCase_ ="""Alexander Joslin""" import operator as op from .stack import Stack def UpperCAmelCase ( _snake_case ): lowerCAmelCase = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} lowerCAmelCase = Stack() lowerCAmelCase = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_snake_case ) ) elif i in operators: # RULE 2 operator_stack.push(_snake_case ) elif i == ")": # RULE 4 lowerCAmelCase = operator_stack.peek() operator_stack.pop() lowerCAmelCase = operand_stack.peek() operand_stack.pop() lowerCAmelCase = operand_stack.peek() operand_stack.pop() lowerCAmelCase = operators[opr](_snake_case , _snake_case ) operand_stack.push(_snake_case ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": UpperCAmelCase_ ="""(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
33
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
1
from __future__ import annotations from fractions import Fraction def UpperCAmelCase ( _snake_case , _snake_case ): return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = [] lowerCAmelCase = 11 lowerCAmelCase = int('''1''' + '''0''' * digit_len ) for num in range(_snake_case , _snake_case ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(_snake_case , _snake_case ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 lowerCAmelCase = 10 return solutions def UpperCAmelCase ( _snake_case = 2 ): lowerCAmelCase = 1.0 for fraction in fraction_list(_snake_case ): lowerCAmelCase = Fraction(_snake_case ) result *= frac.denominator / frac.numerator return int(_snake_case ) if __name__ == "__main__": print(solution())
33
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
1
import datasets from .evaluate import evaluate UpperCAmelCase_ ="""\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ UpperCAmelCase_ =""" This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ UpperCAmelCase_ =""" Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): '''simple docstring''' def __snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowerCAmelCase = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowerCAmelCase = evaluate(dataset=UpperCAmelCase_ , predictions=UpperCAmelCase_ ) return score
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
import itertools import string from collections.abc import Generator, Iterable def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = iter(_snake_case ) while True: lowerCAmelCase = tuple(itertools.islice(_snake_case , _snake_case ) ) if not chunk: return yield chunk def UpperCAmelCase ( _snake_case ): lowerCAmelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) lowerCAmelCase = '''''' if len(_snake_case ) < 2: return dirty for i in range(len(_snake_case ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_snake_case ) & 1: clean += "X" return clean def UpperCAmelCase ( _snake_case ): # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) lowerCAmelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler lowerCAmelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_snake_case ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_snake_case ) return table def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = generate_table(_snake_case ) lowerCAmelCase = prepare_input(_snake_case ) lowerCAmelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_snake_case , 2 ): lowerCAmelCase , lowerCAmelCase = divmod(table.index(_snake_case ) , 5 ) lowerCAmelCase , lowerCAmelCase = divmod(table.index(_snake_case ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = generate_table(_snake_case ) lowerCAmelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_snake_case , 2 ): lowerCAmelCase , lowerCAmelCase = divmod(table.index(_snake_case ) , 5 ) lowerCAmelCase , lowerCAmelCase = divmod(table.index(_snake_case ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
33
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[Any] ="""encoder-decoder""" __a : str =True def __init__( self , **UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase = kwargs.pop('''encoder''' ) lowerCAmelCase = encoder_config.pop('''model_type''' ) lowerCAmelCase = kwargs.pop('''decoder''' ) lowerCAmelCase = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase = True @classmethod def __snake_case ( cls , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ): logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase = True lowerCAmelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = copy.deepcopy(self.__dict__ ) lowerCAmelCase = self.encoder.to_dict() lowerCAmelCase = self.decoder.to_dict() lowerCAmelCase = self.__class__.model_type return output
33
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
1
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =["""image_processor""", """tokenizer"""] __a : List[str] ="""OwlViTImageProcessor""" __a : List[Any] =("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ): lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , UpperCAmelCase_ , ) lowerCAmelCase = kwargs.pop('''feature_extractor''' ) lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="max_length" , UpperCAmelCase_="np" , **UpperCAmelCase_ ): if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not isinstance(text[0] , UpperCAmelCase_ )): lowerCAmelCase = [self.tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )] elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(text[0] , UpperCAmelCase_ ): lowerCAmelCase = [] # Maximum number of queries across batch lowerCAmelCase = max([len(UpperCAmelCase_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCAmelCase_ ) != max_num_queries: lowerCAmelCase = t + [''' '''] * (max_num_queries - len(UpperCAmelCase_ )) lowerCAmelCase = self.tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) encodings.append(UpperCAmelCase_ ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) lowerCAmelCase = BatchEncoding() lowerCAmelCase = input_ids lowerCAmelCase = attention_mask if query_images is not None: lowerCAmelCase = BatchEncoding() lowerCAmelCase = self.image_processor( UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ).pixel_values lowerCAmelCase = query_pixel_values if images is not None: lowerCAmelCase = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.image_processor.post_process(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.image_processor.post_process_object_detection(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def __snake_case ( self ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase_ , ) return self.image_processor_class @property def __snake_case ( self ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase_ , ) return self.image_processor
33
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
1
from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : str =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Dict =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : List[Any] =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Optional[int] =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : List[Any] =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Any =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Dict =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : List[str] =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Any =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Tuple =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) class __UpperCamelCase ( metaclass=__UpperCAmelCase ): '''simple docstring''' __a : Optional[int] =["""flax"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] ) @classmethod def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(cls , ['''flax'''] )
33
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( """The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , __UpperCAmelCase , ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[str] =RobertaConfig __a : Optional[Any] ="""roberta""" def __init__( self , UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ ) lowerCAmelCase = RobertaEmbeddings(UpperCAmelCase_ ) self.init_weights() @add_start_docstrings( """RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. """ , __UpperCAmelCase , ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[str] =RobertaConfig __a : List[Any] ="""roberta""" def __init__( self , UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ ) lowerCAmelCase = config.num_labels lowerCAmelCase = config.num_hidden_layers lowerCAmelCase = DeeRobertaModel(UpperCAmelCase_ ) lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob ) lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=-1 , UpperCAmelCase_=False , ): lowerCAmelCase = self.num_layers try: lowerCAmelCase = self.roberta( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , ) lowerCAmelCase = outputs[1] lowerCAmelCase = self.dropout(UpperCAmelCase_ ) lowerCAmelCase = self.classifier(UpperCAmelCase_ ) lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowerCAmelCase = e.message lowerCAmelCase = e.exit_layer lowerCAmelCase = outputs[0] if not self.training: lowerCAmelCase = entropy(UpperCAmelCase_ ) lowerCAmelCase = [] lowerCAmelCase = [] if labels is not None: if self.num_labels == 1: # We are doing regression lowerCAmelCase = MSELoss() lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: lowerCAmelCase = CrossEntropyLoss() lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits lowerCAmelCase = [] for highway_exit in outputs[-1]: lowerCAmelCase = highway_exit[0] if not self.training: highway_logits_all.append(UpperCAmelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowerCAmelCase = MSELoss() lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: lowerCAmelCase = CrossEntropyLoss() lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCAmelCase_ ) if train_highway: lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowerCAmelCase = (loss,) + outputs if not self.training: lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowerCAmelCase = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets UpperCAmelCase_ ="""\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ UpperCAmelCase_ ="""\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ UpperCAmelCase_ =""" Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): '''simple docstring''' def __snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="auto" , UpperCAmelCase_=-1 , UpperCAmelCase_=0.9 , UpperCAmelCase_=5 , UpperCAmelCase_=5_00 , UpperCAmelCase_="gpt2-large" , UpperCAmelCase_=-1 , UpperCAmelCase_=10_24 , UpperCAmelCase_=25 , UpperCAmelCase_=5 , UpperCAmelCase_=True , UpperCAmelCase_=25 , ): lowerCAmelCase = compute_mauve( p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , ) return out
33
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
1
UpperCAmelCase_ ="""0.18.2""" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
33
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ =["""model.decoder.embed_positions.weights"""] def UpperCAmelCase ( _snake_case ): if "emb" in name: lowerCAmelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: lowerCAmelCase = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: lowerCAmelCase = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: lowerCAmelCase = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: lowerCAmelCase = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: lowerCAmelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: lowerCAmelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: lowerCAmelCase = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: lowerCAmelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: lowerCAmelCase = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: lowerCAmelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = list(state_dict.keys() ) lowerCAmelCase = {} for key in keys: lowerCAmelCase = state_dict.pop(_snake_case ) lowerCAmelCase = rename_keys(_snake_case ) if "in_proj_weight" in key: # split fused qkv proj lowerCAmelCase = val[:hidden_size, :] lowerCAmelCase = val[hidden_size : 2 * hidden_size, :] lowerCAmelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowerCAmelCase = val else: lowerCAmelCase = val return state_dict, enc_dec_proj_state_dict def UpperCAmelCase ( _snake_case ): if checkpoint == "small": # default config values lowerCAmelCase = 1024 lowerCAmelCase = 24 lowerCAmelCase = 16 elif checkpoint == "medium": lowerCAmelCase = 1536 lowerCAmelCase = 48 lowerCAmelCase = 24 elif checkpoint == "large": lowerCAmelCase = 2048 lowerCAmelCase = 48 lowerCAmelCase = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) lowerCAmelCase = MusicgenDecoderConfig( hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , ) return config @torch.no_grad() def UpperCAmelCase ( _snake_case , _snake_case=None , _snake_case=None , _snake_case="cpu" ): lowerCAmelCase = MusicGen.get_pretrained(_snake_case , device=_snake_case ) lowerCAmelCase = decoder_config_from_checkpoint(_snake_case ) lowerCAmelCase = fairseq_model.lm.state_dict() lowerCAmelCase , lowerCAmelCase = rename_state_dict( _snake_case , hidden_size=decoder_config.hidden_size ) lowerCAmelCase = TaEncoderModel.from_pretrained('''t5-base''' ) lowerCAmelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) lowerCAmelCase = MusicgenForCausalLM(_snake_case ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowerCAmelCase , lowerCAmelCase = decoder.load_state_dict(_snake_case , strict=_snake_case ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(_snake_case ) if len(_snake_case ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(_snake_case ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(_snake_case ) # check we can do a forward pass lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowerCAmelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowerCAmelCase = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits if logits.shape != (8, 1, 2048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor lowerCAmelCase = AutoTokenizer.from_pretrained('''t5-base''' ) lowerCAmelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) lowerCAmelCase = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) # set the appropriate bos/pad token ids lowerCAmelCase = 2048 lowerCAmelCase = 2048 # set other default generation config params lowerCAmelCase = int(30 * audio_encoder.config.frame_rate ) lowerCAmelCase = True lowerCAmelCase = 3.0 if pytorch_dump_folder is not None: Path(_snake_case ).mkdir(exist_ok=_snake_case ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(_snake_case ) processor.save_pretrained(_snake_case ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(_snake_case ) processor.push_to_hub(_snake_case ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) UpperCAmelCase_ =parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
33
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } UpperCAmelCase_ ={ """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } UpperCAmelCase_ ={"""facebook/blenderbot_small-90M""": 512} def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : int =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Union[str, Any] =["""input_ids""", """attention_mask"""] def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="__start__" , UpperCAmelCase_="__end__" , UpperCAmelCase_="__unk__" , UpperCAmelCase_="__null__" , **UpperCAmelCase_ , ): super().__init__(unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ ) with open(UpperCAmelCase_ , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase = json.load(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase = [tuple(merge.split() ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = re.sub('''([.,!?()])''' , r''' \1''' , UpperCAmelCase_ ) lowerCAmelCase = re.sub('''(\')''' , r''' \1 ''' , UpperCAmelCase_ ) lowerCAmelCase = re.sub(r'''\s{2,}''' , ''' ''' , UpperCAmelCase_ ) if "\n" in token: lowerCAmelCase = token.replace('''\n''' , ''' __newln__''' ) lowerCAmelCase = token.split(''' ''' ) lowerCAmelCase = [] for token in tokens: if not len(UpperCAmelCase_ ): continue lowerCAmelCase = token.lower() lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: words.append(UpperCAmelCase_ ) continue while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) new_word.extend(word[i:j] ) lowerCAmelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word words.append(UpperCAmelCase_ ) return " ".join(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = token.lower() return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + '''\n''' ) lowerCAmelCase = 0 with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase = token_index writer.write(''' '''.join(UpperCAmelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file
33
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
1
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
1
from cva import destroyAllWindows, imread, imshow, waitKey def UpperCAmelCase ( _snake_case ): # getting number of pixels in the image lowerCAmelCase , lowerCAmelCase = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_snake_case ): for j in range(_snake_case ): lowerCAmelCase = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image UpperCAmelCase_ =imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCAmelCase_ =convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
33
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
1
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowerCAmelCase = mf_knapsack(i - 1 , _snake_case , _snake_case , _snake_case ) else: lowerCAmelCase = max( mf_knapsack(i - 1 , _snake_case , _snake_case , _snake_case ) , mf_knapsack(i - 1 , _snake_case , _snake_case , j - wt[i - 1] ) + val[i - 1] , ) lowerCAmelCase = val return f[i][j] def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowerCAmelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowerCAmelCase = dp[i - 1][w_] return dp[n][w_], dp def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): if not (isinstance(_snake_case , (list, tuple) ) and isinstance(_snake_case , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) lowerCAmelCase = len(_snake_case ) if num_items != len(_snake_case ): lowerCAmelCase = ( '''The number of weights must be the same as the number of values.\n''' F"""But got {num_items} weights and {len(_snake_case )} values""" ) raise ValueError(_snake_case ) for i in range(_snake_case ): if not isinstance(wt[i] , _snake_case ): lowerCAmelCase = ( '''All weights must be integers but got weight of ''' F"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_snake_case ) lowerCAmelCase , lowerCAmelCase = knapsack(_snake_case , _snake_case , _snake_case , _snake_case ) lowerCAmelCase = set() _construct_solution(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return optimal_val, example_optional_set def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_snake_case , _snake_case , i - 1 , _snake_case , _snake_case ) else: optimal_set.add(_snake_case ) _construct_solution(_snake_case , _snake_case , i - 1 , j - wt[i - 1] , _snake_case ) if __name__ == "__main__": UpperCAmelCase_ =[3, 2, 4, 4] UpperCAmelCase_ =[4, 3, 2, 3] UpperCAmelCase_ =4 UpperCAmelCase_ =6 UpperCAmelCase_ =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] UpperCAmelCase_,UpperCAmelCase_ =knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 UpperCAmelCase_,UpperCAmelCase_ =knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
33
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
1
import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): # Construct model if openai_config_file == "": lowerCAmelCase = OpenAIGPTConfig() else: lowerCAmelCase = OpenAIGPTConfig.from_json_file(_snake_case ) lowerCAmelCase = OpenAIGPTModel(_snake_case ) # Load weights from numpy load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case ) # Save pytorch-model lowerCAmelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME lowerCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , _snake_case ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase_ =parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
33
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def UpperCAmelCase ( _snake_case = "" ): lowerCAmelCase = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' lowerCAmelCase = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) lowerCAmelCase = soup.find_all('''td''' , attrs='''titleColumn''' ) lowerCAmelCase = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_snake_case , _snake_case ) } def UpperCAmelCase ( _snake_case = "IMDb_Top_250_Movies.csv" ): lowerCAmelCase = get_imdb_top_aaa_movies() with open(_snake_case , '''w''' , newline='''''' ) as out_file: lowerCAmelCase = csv.writer(_snake_case ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
33
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""unispeech""" def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="group" , UpperCAmelCase_="gelu" , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_=False , UpperCAmelCase_=1_28 , UpperCAmelCase_=16 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_=3_20 , UpperCAmelCase_=2 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1_00 , UpperCAmelCase_=2_56 , UpperCAmelCase_=2_56 , UpperCAmelCase_=0.1 , UpperCAmelCase_="mean" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=2_56 , UpperCAmelCase_=80 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=0.5 , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) lowerCAmelCase = hidden_size lowerCAmelCase = feat_extract_norm lowerCAmelCase = feat_extract_activation lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = conv_bias lowerCAmelCase = num_conv_pos_embeddings lowerCAmelCase = num_conv_pos_embedding_groups lowerCAmelCase = len(self.conv_dim ) lowerCAmelCase = num_hidden_layers lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = feat_proj_dropout lowerCAmelCase = final_dropout lowerCAmelCase = layerdrop lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range lowerCAmelCase = num_ctc_classes lowerCAmelCase = vocab_size lowerCAmelCase = do_stable_layer_norm lowerCAmelCase = use_weighted_layer_sum lowerCAmelCase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase = apply_spec_augment lowerCAmelCase = mask_time_prob lowerCAmelCase = mask_time_length lowerCAmelCase = mask_time_min_masks lowerCAmelCase = mask_feature_prob lowerCAmelCase = mask_feature_length lowerCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase = num_codevectors_per_group lowerCAmelCase = num_codevector_groups lowerCAmelCase = contrastive_logits_temperature lowerCAmelCase = feat_quantizer_dropout lowerCAmelCase = num_negatives lowerCAmelCase = codevector_dim lowerCAmelCase = proj_codevector_dim lowerCAmelCase = diversity_loss_weight # ctc loss lowerCAmelCase = ctc_loss_reduction lowerCAmelCase = ctc_zero_infinity # pretraining loss lowerCAmelCase = replace_prob @property def __snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
33
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
1
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
1
from __future__ import annotations def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , ): if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
33
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
1
import functools def UpperCAmelCase ( _snake_case , _snake_case ): # Validation if not isinstance(_snake_case , _snake_case ) or not all(isinstance(_snake_case , _snake_case ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(_snake_case ) != 3 or not all(isinstance(_snake_case , _snake_case ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(_snake_case ) == 0: return 0 if min(_snake_case ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(_snake_case ) >= 366: raise ValueError('''All days elements should be less than 366''' ) lowerCAmelCase = set(_snake_case ) @functools.cache def dynamic_programming(_snake_case ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
33
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__UpperCAmelCase ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : str =field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __a : ClassVar[Features] =Features({"""text""": Value("""string""" )} ) __a : ClassVar[Features] =Features({} ) __a : str ="text" @property def __snake_case ( self ): return {self.text_column: "text"}
33
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
1
from __future__ import annotations def UpperCAmelCase ( _snake_case , _snake_case = None , _snake_case = None ): if start is None: lowerCAmelCase = 0 if end is None: lowerCAmelCase = len(_snake_case ) - 1 if start >= end: return lowerCAmelCase = (start + end) // 2 slowsort(_snake_case , _snake_case , _snake_case ) slowsort(_snake_case , mid + 1 , _snake_case ) if sequence[end] < sequence[mid]: lowerCAmelCase , lowerCAmelCase = sequence[mid], sequence[end] slowsort(_snake_case , _snake_case , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
33
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
1