code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class __A : """simple docstring""" def __init__( self ) -> Tuple: a ={} def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=1 ) -> Union[str, Any]: if self.graph.get(__A ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: a =[[w, v]] if not self.graph.get(__A ): a =[] def SCREAMING_SNAKE_CASE ( self ) -> int: return list(self.graph ) def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Optional[Any]: if self.graph.get(__A ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__A ) def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> List[Any]: if s == d: return [] a =[] a =[] if s == -2: a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__A ) return visited else: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =ss # check if se have reached the starting point if len(__A ) == 0: return visited def SCREAMING_SNAKE_CASE ( self , __A=-1 ) -> int: if c == -1: a =floor(random() * 1_0000 ) + 10 for i in range(__A ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): a =floor(random() * c ) + 1 if n != i: self.add_pair(__A , __A , 1 ) def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> List[str]: a =deque() a =[] if s == -2: a =list(self.graph )[0] d.append(__A ) visited.append(__A ) while d: a =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]: a =0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple: return len(self.graph[u] ) def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> List[Any]: a =[] a =[] if s == -2: a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =s a =[] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =ss # check if se have reached the starting point if len(__A ) == 0: return sorted_nodes def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a =[] a =[] a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =-2 a =[] a =s a =False a =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): a =len(__A ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() a =True if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =False indirect_parents.append(__A ) a =s a =ss # check if se have reached the starting point if len(__A ) == 0: return list(__A ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: a =[] a =[] a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =-2 a =[] a =s a =False a =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): a =len(__A ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() a =True if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =False indirect_parents.append(__A ) a =s a =ss # check if se have reached the starting point if len(__A ) == 0: return False def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> List[str]: a =time() self.dfs(__A , __A ) a =time() return end - begin def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> int: a =time() self.bfs(__A ) a =time() return end - begin class __A : """simple docstring""" def __init__( self ) -> List[str]: a ={} def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=1 ) -> Dict: # check if the u exists if self.graph.get(__A ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist a =[[w, v]] # add the other way if self.graph.get(__A ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist a =[[w, u]] def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Any: if self.graph.get(__A ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__A ) # the other way round if self.graph.get(__A ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__A ) def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> int: if s == d: return [] a =[] a =[] if s == -2: a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__A ) return visited else: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =ss # check if se have reached the starting point if len(__A ) == 0: return visited def SCREAMING_SNAKE_CASE ( self , __A=-1 ) -> List[Any]: if c == -1: a =floor(random() * 1_0000 ) + 10 for i in range(__A ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): a =floor(random() * c ) + 1 if n != i: self.add_pair(__A , __A , 1 ) def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> str: a =deque() a =[] if s == -2: a =list(self.graph )[0] d.append(__A ) visited.append(__A ) while d: a =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def SCREAMING_SNAKE_CASE ( self , __A ) -> str: return len(self.graph[u] ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a =[] a =[] a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =-2 a =[] a =s a =False a =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): a =len(__A ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() a =True if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =False indirect_parents.append(__A ) a =s a =ss # check if se have reached the starting point if len(__A ) == 0: return list(__A ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a =[] a =[] a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =-2 a =[] a =s a =False a =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): a =len(__A ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() a =True if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =False indirect_parents.append(__A ) a =s a =ss # check if se have reached the starting point if len(__A ) == 0: return False def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return list(self.graph ) def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> Optional[int]: a =time() self.dfs(__A , __A ) a =time() return end - begin def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> Dict: a =time() self.bfs(__A ) a =time() return end - begin
81
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler A__ = 16 A__ = 32 def _UpperCAmelCase ( snake_case , snake_case = 16 , snake_case = "bert-base-cased" ): """simple docstring""" _lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case ) _lowerCAmelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case ): # max_length=None => use the model max length (it's actually the default) _lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case , max_length=snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _lowerCAmelCase = datasets.map( snake_case , batched=snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" ) return tokenizer.pad(snake_case , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. _lowerCAmelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) _lowerCAmelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) return train_dataloader, eval_dataloader def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" model.eval() _lowerCAmelCase = 0 for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCAmelCase = model(**snake_case ) _lowerCAmelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _lowerCAmelCase , _lowerCAmelCase = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case ) - 1: _lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case , references=snake_case , ) _lowerCAmelCase = metric.compute() return eval_metric["accuracy"] def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase = config["""lr"""] _lowerCAmelCase = int(config["""num_epochs"""] ) _lowerCAmelCase = int(config["""seed"""] ) _lowerCAmelCase = int(config["""batch_size"""] ) _lowerCAmelCase = args.model_name_or_path set_seed(snake_case ) _lowerCAmelCase , _lowerCAmelCase = get_dataloaders(snake_case , snake_case , snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(snake_case , return_dict=snake_case ) # Instantiate optimizer _lowerCAmelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=snake_case ) if accelerator.state.deepspeed_plugin is not None: _lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: _lowerCAmelCase = 1 _lowerCAmelCase = (len(snake_case ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=snake_case , num_warmup_steps=0 , num_training_steps=snake_case , ) else: _lowerCAmelCase = DummyScheduler(snake_case , total_num_steps=snake_case , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case ) # We need to keep track of how many total steps we have iterated over _lowerCAmelCase = 0 # We also need to keep track of the stating epoch so files are named properly _lowerCAmelCase = 0 _lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" ) _lowerCAmelCase = num_epochs if args.partial_train_epoch is not None: _lowerCAmelCase = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) _lowerCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1] _lowerCAmelCase = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break _lowerCAmelCase = int(snake_case ) + 1 _lowerCAmelCase = evaluation_loop(snake_case , snake_case , snake_case , snake_case ) accelerator.print("""resumed checkpoint performance:""" , snake_case ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f: _lowerCAmelCase = json.load(snake_case ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model _lowerCAmelCase = {} for epoch in range(snake_case , snake_case ): model.train() for step, batch in enumerate(snake_case ): _lowerCAmelCase = model(**snake_case ) _lowerCAmelCase = outputs.loss _lowerCAmelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 _lowerCAmelCase = F'epoch_{epoch}' _lowerCAmelCase = os.path.join(args.output_dir , snake_case ) accelerator.save_state(snake_case ) _lowerCAmelCase = evaluation_loop(snake_case , snake_case , snake_case , snake_case ) _lowerCAmelCase = accuracy _lowerCAmelCase = lr_scheduler.get_lr()[0] _lowerCAmelCase = optimizer.param_groups[0]["""lr"""] _lowerCAmelCase = epoch _lowerCAmelCase = overall_step accelerator.print(F'epoch {epoch}:' , snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f: json.dump(snake_case , snake_case ) def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=snake_case , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case , ) parser.add_argument( """--output_dir""" , type=snake_case , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=snake_case , default=snake_case , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=snake_case , default=snake_case , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=snake_case , default=2 , help="""Number of train epochs.""" , ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(snake_case , snake_case ) if __name__ == "__main__": main()
82
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case_ : Union[str, Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[str] = ['XLNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = ['XLNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Union[str, Any] = [ 'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLNetForMultipleChoice', 'XLNetForQuestionAnswering', 'XLNetForQuestionAnsweringSimple', 'XLNetForSequenceClassification', 'XLNetForTokenClassification', 'XLNetLMHeadModel', 'XLNetModel', 'XLNetPreTrainedModel', 'load_tf_weights_in_xlnet', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ 'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLNetForMultipleChoice', 'TFXLNetForQuestionAnsweringSimple', 'TFXLNetForSequenceClassification', 'TFXLNetForTokenClassification', 'TFXLNetLMHeadModel', 'TFXLNetMainLayer', 'TFXLNetModel', 'TFXLNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
0
"""simple docstring""" import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Dict = os.path.abspath(lowercase__ ) logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model lowerCAmelCase_ :Any = tf.train.list_variables(lowercase__ ) lowerCAmelCase_ :List[str] = [] lowerCAmelCase_ :str = [] lowerCAmelCase_ :int = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") lowerCAmelCase_ :Union[str, Any] = full_name.split("""/""" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(f"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' lowerCAmelCase_ :Dict = name[1:] # figure out how many levels deep the name is lowerCAmelCase_ :Tuple = 0 for _name in name: if _name.startswith("""layer_with_weights""" ): depth += 1 else: break layer_depth.append(lowercase__ ) # read data lowerCAmelCase_ :Optional[Any] = tf.train.load_variable(lowercase__ , lowercase__ ) names.append("""/""".join(lowercase__ ) ) arrays.append(lowercase__ ) logger.info(f"""Read a total of {len(lowercase__ ):,} layers""" ) # Sanity check if len(set(lowercase__ ) ) != 1: raise ValueError(f"""Found layer names with different depths (layer depth {list(set(lowercase__ ) )})""" ) lowerCAmelCase_ :Dict = list(set(lowercase__ ) )[0] if layer_depth != 1: raise ValueError( """The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP""" """ heads.""" ) # convert layers logger.info("""Converting weights...""" ) for full_name, array in zip(lowercase__ , lowercase__ ): lowerCAmelCase_ :str = full_name.split("""/""" ) lowerCAmelCase_ :Union[str, Any] = model lowerCAmelCase_ :Tuple = [] for i, m_name in enumerate(lowercase__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("""layer_with_weights""" ): lowerCAmelCase_ :Dict = int(m_name.split("""-""" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["""embeddings""", """LayerNorm"""] ) lowerCAmelCase_ :Dict = getattr(lowercase__ , """embeddings""" ) lowerCAmelCase_ :List[str] = getattr(lowercase__ , """LayerNorm""" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] ) lowerCAmelCase_ :Optional[Any] = getattr(lowercase__ , """encoder""" ) lowerCAmelCase_ :List[str] = getattr(lowercase__ , """layer""" ) lowerCAmelCase_ :Tuple = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["""pooler""", """dense"""] ) lowerCAmelCase_ :Tuple = getattr(lowercase__ , """pooler""" ) lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """dense""" ) elif m_name == "embeddings": trace.append("""embeddings""" ) lowerCAmelCase_ :Dict = getattr(lowercase__ , """embeddings""" ) if layer_num == 0: trace.append("""word_embeddings""" ) lowerCAmelCase_ :Any = getattr(lowercase__ , """word_embeddings""" ) elif layer_num == 1: trace.append("""position_embeddings""" ) lowerCAmelCase_ :int = getattr(lowercase__ , """position_embeddings""" ) elif layer_num == 2: trace.append("""token_type_embeddings""" ) lowerCAmelCase_ :str = getattr(lowercase__ , """token_type_embeddings""" ) else: raise ValueError(f"""Unknown embedding layer with name {full_name}""" ) trace.append("""weight""" ) lowerCAmelCase_ :Tuple = getattr(lowercase__ , """weight""" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["""attention""", """self"""] ) lowerCAmelCase_ :str = getattr(lowercase__ , """attention""" ) lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """self""" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["""attention""", """output""", """LayerNorm"""] ) lowerCAmelCase_ :str = getattr(lowercase__ , """attention""" ) lowerCAmelCase_ :Any = getattr(lowercase__ , """output""" ) lowerCAmelCase_ :str = getattr(lowercase__ , """LayerNorm""" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["""attention""", """output""", """dense"""] ) lowerCAmelCase_ :Any = getattr(lowercase__ , """attention""" ) lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """output""" ) lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """dense""" ) elif m_name == "_output_dense": # output dense trace.extend(["""output""", """dense"""] ) lowerCAmelCase_ :Any = getattr(lowercase__ , """output""" ) lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """dense""" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["""output""", """LayerNorm"""] ) lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """output""" ) lowerCAmelCase_ :Any = getattr(lowercase__ , """LayerNorm""" ) elif m_name == "_key_dense": # attention key trace.append("""key""" ) lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """key""" ) elif m_name == "_query_dense": # attention query trace.append("""query""" ) lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """query""" ) elif m_name == "_value_dense": # attention value trace.append("""value""" ) lowerCAmelCase_ :List[str] = getattr(lowercase__ , """value""" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["""intermediate""", """dense"""] ) lowerCAmelCase_ :Optional[Any] = getattr(lowercase__ , """intermediate""" ) lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """dense""" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("""output""" ) lowerCAmelCase_ :str = getattr(lowercase__ , """output""" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("""bias""" ) lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """bias""" ) elif m_name in ["kernel", "gamma"]: trace.append("""weight""" ) lowerCAmelCase_ :str = getattr(lowercase__ , """weight""" ) else: logger.warning(f"""Ignored {m_name}""" ) # for certain layers reshape is necessary lowerCAmelCase_ :Dict = """.""".join(lowercase__ ) if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase__ ) or re.match( r"""(\S+)\.attention\.output\.dense\.weight""" , lowercase__ ): lowerCAmelCase_ :Tuple = array.reshape(pointer.data.shape ) if "kernel" in full_name: lowerCAmelCase_ :Optional[Any] = array.transpose() if pointer.shape == array.shape: lowerCAmelCase_ :List[str] = torch.from_numpy(lowercase__ ) else: raise ValueError( f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" f""" {array.shape}""" ) logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' logger.info(f"""Loading model based on config from {config_path}...""" ) lowerCAmelCase_ :Optional[int] = BertConfig.from_json_file(lowercase__ ) lowerCAmelCase_ :Optional[Any] = BertModel(lowercase__ ) # Load weights from checkpoint logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) __UpperCAmelCase = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
84
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
'''simple docstring''' def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = 0 # if input_string is "aba" than new_input_string become "a|b|a" snake_case_ = "" snake_case_ = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(snake_case ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring snake_case_ , snake_case_ = 0, 0 # length[i] shows the length of palindromic substring with center i snake_case_ = [1 for i in range(len(snake_case ) )] # for each character in new_string find corresponding palindromic string snake_case_ = 0 for j in range(len(snake_case ) ): snake_case_ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(snake_case ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 snake_case_ = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: snake_case_ = j - k + 1 # noqa: E741 snake_case_ = j + k - 1 # update max_length and start position if max_length < length[j]: snake_case_ = length[j] snake_case_ = j # create that string snake_case_ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
0
"""simple docstring""" import math def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[Any] = 0 __lowerCAmelCase : List[str] = 0 while num > 0: __lowerCAmelCase : Any = num % 8 __lowerCAmelCase : List[str] = octal + (remainder * math.floor(math.pow(10 , _UpperCamelCase ) )) counter += 1 __lowerCAmelCase : Tuple = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"0o{int(_UpperCamelCase )}" def __lowerCAmelCase (): print('\n2 in octal is:' ) print(decimal_to_octal(2 ) ) # = 2 print('\n8 in octal is:' ) print(decimal_to_octal(8 ) ) # = 10 print('\n65 in octal is:' ) print(decimal_to_octal(65 ) ) # = 101 print('\n216 in octal is:' ) print(decimal_to_octal(216 ) ) # = 330 print('\n512 in octal is:' ) print(decimal_to_octal(512 ) ) # = 1000 print('\n' ) if __name__ == "__main__": main()
86
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
0
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
0
def a__ ( A_ ): '''simple docstring''' __magic_name__ = [int(A_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()] return len(A_ ) == 4 and all(0 <= int(A_ ) <= 254 for octet in octets ) if __name__ == "__main__": __lowerCAmelCase : List[str] = input().strip() __lowerCAmelCase : int = 'valid' if is_ip_va_address_valid(ip) else 'invalid' print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
88
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __lowerCAmelCase = logging.get_logger(__name__) class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = ['input_values', 'padding_mask'] def __init__( self : Optional[int] ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : int = 24000 ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : float = None ,_UpperCAmelCase : float = None ,**_UpperCAmelCase : str ,): super().__init__(feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Optional[Any] = chunk_length_s _a : str = overlap @property def __lowercase ( self : Union[str, Any] ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowercase ( self : Optional[int] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : Tuple ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCAmelCase : Optional[bool] = False ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs _a : Union[str, Any] = True _a : Optional[int] = bool( isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: _a : Tuple = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ): _a : List[str] = np.asarray(_UpperCAmelCase ,dtype=np.floataa ) elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): _a : Optional[int] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: _a : int = [np.asarray(_UpperCAmelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCAmelCase ): if example.ndim > 2: raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" ) _a : Tuple = None _a : str = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: _a : Optional[Any] = min(array.shape[0] for array in raw_audio ) _a : Optional[int] = int(np.floor(max_length / self.chunk_stride ) ) _a : int = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: _a : str = max(array.shape[0] for array in raw_audio ) _a : Optional[int] = int(np.ceil(max_length / self.chunk_stride ) ) _a : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length _a : str = 'max_length' else: _a : Dict = input_values # normal padding on batch if padded_inputs is None: _a : Tuple = self.pad( _UpperCAmelCase ,max_length=_UpperCAmelCase ,truncation=_UpperCAmelCase ,padding=_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,) if padding: _a : Optional[Any] = padded_inputs.pop('attention_mask' ) _a : Any = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: _a : Optional[int] = example[..., None] input_values.append(example.T ) _a : Optional[Any] = input_values if return_tensors is not None: _a : str = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs
89
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
from math import sqrt def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" __lowerCamelCase = True # 0 and 1 are none primes. if number <= 1: __lowerCamelCase = False for divisor in range(2 , int(round(sqrt(UpperCamelCase__ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowerCamelCase = False break # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'status' must been from type bool" return status def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Tuple: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowerCamelCase = list(range(2 , n + 1 ) ) __lowerCamelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase__ ) ): for j in range(i + 1 , len(UpperCamelCase__ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowerCamelCase = 0 # filters actual prime numbers. __lowerCamelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list" return ans def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[str]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2" __lowerCamelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase__ ): ans.append(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list" return ans def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number >= 0, "'number' must been an int and >= 0" __lowerCamelCase = [] # this list will be returns of the function. # potential prime number factors. __lowerCamelCase = 2 __lowerCamelCase = number if number == 0 or number == 1: ans.append(UpperCamelCase__ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase__ ): while quotient != 1: if is_prime(UpperCamelCase__ ) and (quotient % factor == 0): ans.append(UpperCamelCase__ ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list" return ans def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCamelCase = 0 # prime factorization of 'number' __lowerCamelCase = prime_factorization(UpperCamelCase__ ) __lowerCamelCase = max(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int" return ans def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCamelCase = 0 # prime factorization of 'number' __lowerCamelCase = prime_factorization(UpperCamelCase__ ) __lowerCamelCase = min(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int" return ans def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[int]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase__ ), "compare bust been from type bool" return number % 2 == 0 def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase__ ), "compare bust been from type bool" return number % 2 != 0 def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (number > 2) and is_even(UpperCamelCase__ ) ), "'number' must been an int, even and > 2" __lowerCamelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowerCamelCase = get_prime_numbers(UpperCamelCase__ ) __lowerCamelCase = len(UpperCamelCase__ ) # run variable for while-loops. __lowerCamelCase = 0 __lowerCamelCase = None # exit variable. for break up the loops __lowerCamelCase = True while i < len_pn and loop: __lowerCamelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowerCamelCase = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (len(UpperCamelCase__ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Optional[Any]: """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowerCamelCase = 0 while numbera != 0: __lowerCamelCase = numbera % numbera __lowerCamelCase = numbera __lowerCamelCase = rest # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> int: """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowerCamelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowerCamelCase = prime_factorization(UpperCamelCase__ ) __lowerCamelCase = prime_factorization(UpperCamelCase__ ) elif numbera == 1 or numbera == 1: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = max(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowerCamelCase = prime_fac_a.count(UpperCamelCase__ ) __lowerCamelCase = prime_fac_a.count(UpperCamelCase__ ) for _ in range(max(UpperCamelCase__ , UpperCamelCase__ ) ): ans *= n else: __lowerCamelCase = prime_fac_a.count(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ): ans *= n done.append(UpperCamelCase__ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowerCamelCase = prime_fac_a.count(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ): ans *= n done.append(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> str: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'number' must been a positive int" __lowerCamelCase = 0 __lowerCamelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase__ ): ans += 1 # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and is_prime( UpperCamelCase__ ), "'ans' must been a prime number and from type int" return ans def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Tuple: """simple docstring""" assert ( is_prime(UpperCamelCase__ ) and is_prime(UpperCamelCase__ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowerCamelCase = p_number_a + 1 # jump to the next number __lowerCamelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase__ ): number += 1 while number < p_number_a: ans.append(UpperCamelCase__ ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase__ ): number += 1 # precondition assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ans[0] != p_number_a and ans[len(UpperCamelCase__ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 1), "'n' must been int and >= 1" __lowerCamelCase = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase__ ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase__ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[int]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number > 1 ), "'number' must been an int and >= 1" __lowerCamelCase = get_divisors(UpperCamelCase__ ) # precondition assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase__ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowerCamelCase = gcd(abs(UpperCamelCase__ ) , abs(UpperCamelCase__ ) ) # precondition assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been a int and >= 0" __lowerCamelCase = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been an int and >= 0" __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 1 # this will be return for _ in range(n - 1 ): __lowerCamelCase = ans ans += fiba __lowerCamelCase = tmp return ans
90
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = (PNDMScheduler,) __UpperCamelCase = (("num_inference_steps", 5_0),) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowercase_) return config def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_) new_scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = 10 SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_) for i, t in enumerate(scheduler.prk_timesteps): SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample for i, t in enumerate(scheduler.plms_timesteps): SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''): scheduler.set_timesteps(lowercase_) elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1) SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample SCREAMING_SNAKE_CASE_ : str = 0.1 * sample SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' with self.assertRaises(lowercase_): SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.full_loop() SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_98.13_18) < 1e-2 assert abs(result_mean.item() - 0.25_80) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 67.39_86) < 1e-2 assert abs(result_mean.item() - 0.08_78) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 2_30.03_99) < 1e-2 assert abs(result_mean.item() - 0.29_95) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_86.94_82) < 1e-2 assert abs(result_mean.item() - 0.24_34) < 1e-3
91
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class a__ ( unittest.TestCase ): def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=4 , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_attention_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_choices def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_attention_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = True __lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class a__ ( snake_case__ , unittest.TestCase ): _a : List[str] = True _a : List[str] = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = FlaxBertModelTester(self ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = FlaxBertModel.from_pretrained("bert-base-cased" ) __lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_A )
92
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0
'''simple docstring''' def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list] ): """simple docstring""" lowercase_ : Union[str, Any] = current_set.copy() for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ): lowercase_ : List[Any] = row[0] for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ): if magnitude == 0: lowercase_ : Dict = column continue lowercase_ : Any = column / magnitude # Subtract to cancel term lowercase_ : Union[str, Any] = current_set[0] lowercase_ : Dict = [first_row] lowercase_ : Optional[Any] = current_set[1::] for row in current_set: lowercase_ : Optional[Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__SCREAMING_SNAKE_CASE ) continue for column_index in range(len(__SCREAMING_SNAKE_CASE ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__SCREAMING_SNAKE_CASE ) # Create next recursion iteration set if len(final_set[0] ) != 3: lowercase_ : List[Any] = final_set[0] lowercase_ : Any = [] lowercase_ : List[str] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) lowercase_ : Optional[Any] = simplify(__SCREAMING_SNAKE_CASE ) for i in range(len(__SCREAMING_SNAKE_CASE ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __SCREAMING_SNAKE_CASE ) lowercase_ : int = resultant return final_set def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list] ): """simple docstring""" if len(__SCREAMING_SNAKE_CASE ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) lowercase_ : Dict = len(__SCREAMING_SNAKE_CASE ) + 1 if any(len(__SCREAMING_SNAKE_CASE ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__SCREAMING_SNAKE_CASE ) == 1: return [equations[0][-1] / equations[0][0]] lowercase_ : Optional[int] = equations.copy() if any(0 in row for row in data_set ): lowercase_ : Tuple = data_set.copy() lowercase_ : Any = [] for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ): if 0 not in row: lowercase_ : Optional[Any] = data_set.pop(__SCREAMING_SNAKE_CASE ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __SCREAMING_SNAKE_CASE ) lowercase_ : int = data_set.copy() lowercase_ : List[str] = simplify(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = simplified[::-1] lowercase_ : list = [] for row in simplified: lowercase_ : Dict = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue lowercase_ : Optional[int] = row.copy()[: len(__SCREAMING_SNAKE_CASE ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__SCREAMING_SNAKE_CASE ) == 0: solutions.append(0 ) continue lowercase_ : List[str] = temp_row[1::] lowercase_ : Tuple = temp_row[::-1] for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ): current_solution -= column * solutions[column_index] solutions.append(__SCREAMING_SNAKE_CASE ) lowercase_ : str = [] for item in solutions: final.append(float(round(__SCREAMING_SNAKE_CASE , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Optional[Any] = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
93
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] ): """simple docstring""" if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(UpperCAmelCase_ , '''_dynamo''' ): return False return isinstance(UpperCAmelCase_ , torch._dynamo.eval_frame.OptimizedModule ) def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : bool = True ): """simple docstring""" a :List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) a :List[str] = is_compiled_module(UpperCAmelCase_ ) if is_compiled: a :Tuple = model a :Optional[int] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Any = model.module if not keep_fpaa_wrapper: a :Union[str, Any] = getattr(UpperCAmelCase_ , '''forward''' ) a :str = model.__dict__.pop('''_original_forward''' , UpperCAmelCase_ ) if original_forward is not None: while hasattr(UpperCAmelCase_ , '''__wrapped__''' ): a :Tuple = forward.__wrapped__ if forward == original_forward: break a :Union[str, Any] = forward if getattr(UpperCAmelCase_ , '''_converted_to_transformer_engine''' , UpperCAmelCase_ ): convert_model(UpperCAmelCase_ , to_transformer_engine=UpperCAmelCase_ ) if is_compiled: a :List[Any] = model a :int = compiled_model return model def __lowerCamelCase ( ): """simple docstring""" PartialState().wait_for_everyone() def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(UpperCAmelCase_ , UpperCAmelCase_ ) elif PartialState().local_process_index == 0: torch.save(UpperCAmelCase_ , UpperCAmelCase_ ) @contextmanager def __lowerCamelCase ( **UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" for key, value in kwargs.items(): a :Union[str, Any] = str(UpperCAmelCase_ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __lowerCamelCase ( UpperCAmelCase_ : Dict ): """simple docstring""" if not hasattr(UpperCAmelCase_ , '''__qualname__''' ) and not hasattr(UpperCAmelCase_ , '''__name__''' ): a :List[str] = getattr(UpperCAmelCase_ , '''__class__''' , UpperCAmelCase_ ) if hasattr(UpperCAmelCase_ , '''__qualname__''' ): return obj.__qualname__ if hasattr(UpperCAmelCase_ , '''__name__''' ): return obj.__name__ return str(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ): """simple docstring""" for key, value in source.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Tuple = destination.setdefault(UpperCAmelCase_ , {} ) merge_dicts(UpperCAmelCase_ , UpperCAmelCase_ ) else: a :Optional[int] = value return destination def __lowerCamelCase ( UpperCAmelCase_ : int = None ): """simple docstring""" if port is None: a :Any = 2_9500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
94
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase : Any = logging.get_logger(__name__) # TODO: upload to AWS UpperCAmelCase : Dict = { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json""" ), } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Optional[Any] = """retribert""" def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=0 , **lowerCAmelCase__ , ) -> str: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : Tuple =vocab_size a__ : Any =hidden_size a__ : Optional[Any] =num_hidden_layers a__ : Optional[int] =num_attention_heads a__ : List[Any] =hidden_act a__ : Dict =intermediate_size a__ : Optional[int] =hidden_dropout_prob a__ : Dict =attention_probs_dropout_prob a__ : Optional[int] =max_position_embeddings a__ : List[str] =type_vocab_size a__ : Union[str, Any] =initializer_range a__ : Optional[Any] =layer_norm_eps a__ : int =share_encoders a__ : List[str] =projection_dim
95
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" import math def _snake_case ( lowercase__ ): if not isinstance(lowercase__ , lowercase__ ): _lowerCamelCase : Any = f'''Input value of [number={number}] must be an integer''' raise TypeError(lowercase__ ) if number < 1: _lowerCamelCase : Tuple = f'''Input value of [number={number}] must be > 0''' raise ValueError(lowercase__ ) elif number == 1: return 3 elif number == 2: return 5 else: _lowerCamelCase : Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2 _lowerCamelCase : int = [3, 5] _lowerCamelCase : Dict = 2 _lowerCamelCase : Optional[int] = 3 for block in range(1 , lowercase__ ): for _ in range(lowercase__ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): lowercase__ = 0 try: lowercase__ = proth(number) except ValueError: print(F"ValueError: there is no {number}th Proth number") continue print(F"The {number}th Proth number: {value}")
96
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :str = 1 UpperCamelCase__ :Tuple = 3 UpperCamelCase__ :Union[str, Any] = (32, 32) UpperCamelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image @property def lowerCAmelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase__ :Dict = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def lowerCAmelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase__ :int = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def lowerCAmelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase__ :Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) return CLIPTextModel(UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ :str = self.dummy_cond_unet_upscale UpperCamelCase__ :Tuple = DDPMScheduler() UpperCamelCase__ :List[Any] = DDIMScheduler(prediction_type='''v_prediction''' ) UpperCamelCase__ :Union[str, Any] = self.dummy_vae UpperCamelCase__ :Any = self.dummy_text_encoder UpperCamelCase__ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCamelCase__ :Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__ :Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCamelCase__ :Dict = StableDiffusionUpscalePipeline( unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , ) UpperCamelCase__ :str = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = '''A painting of a squirrel eating a burger''' UpperCamelCase__ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) UpperCamelCase__ :Optional[int] = sd_pipe( [prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) UpperCamelCase__ :int = output.images UpperCamelCase__ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) UpperCamelCase__ :str = sd_pipe( [prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0] UpperCamelCase__ :List[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ :Dict = image_from_tuple[0, -3:, -3:, -1] UpperCamelCase__ :Tuple = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCamelCase__ :Tuple = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ :List[str] = self.dummy_cond_unet_upscale UpperCamelCase__ :Tuple = DDPMScheduler() UpperCamelCase__ :Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' ) UpperCamelCase__ :Dict = self.dummy_vae UpperCamelCase__ :Any = self.dummy_text_encoder UpperCamelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCamelCase__ :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__ :Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCamelCase__ :Dict = StableDiffusionUpscalePipeline( unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , ) UpperCamelCase__ :int = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCamelCase__ :int = '''A painting of a squirrel eating a burger''' UpperCamelCase__ :int = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) UpperCamelCase__ :List[str] = output.images assert image.shape[0] == 2 UpperCamelCase__ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) UpperCamelCase__ :Union[str, Any] = sd_pipe( [prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) UpperCamelCase__ :List[Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.dummy_cond_unet_upscale UpperCamelCase__ :List[Any] = DDPMScheduler() UpperCamelCase__ :Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' ) UpperCamelCase__ :Optional[int] = self.dummy_vae UpperCamelCase__ :Union[str, Any] = self.dummy_text_encoder UpperCamelCase__ :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCamelCase__ :Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__ :Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCamelCase__ :Optional[int] = unet.half() UpperCamelCase__ :List[str] = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCamelCase__ :Any = StableDiffusionUpscalePipeline( unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , ) UpperCamelCase__ :Any = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCamelCase__ :Any = '''A painting of a squirrel eating a burger''' UpperCamelCase__ :Any = torch.manual_seed(0 ) UpperCamelCase__ :Dict = sd_pipe( [prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , ).images UpperCamelCase__ :Tuple = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) UpperCamelCase__ :Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat.npy''' ) UpperCamelCase__ :List[Any] = '''stabilityai/stable-diffusion-x4-upscaler''' UpperCamelCase__ :Any = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() UpperCamelCase__ :Optional[Any] = '''a cat sitting on a park bench''' UpperCamelCase__ :Optional[int] = torch.manual_seed(0 ) UpperCamelCase__ :List[str] = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , ) UpperCamelCase__ :Any = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) UpperCamelCase__ :Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat_fp16.npy''' ) UpperCamelCase__ :Tuple = '''stabilityai/stable-diffusion-x4-upscaler''' UpperCamelCase__ :int = StableDiffusionUpscalePipeline.from_pretrained( UpperCamelCase_ , torch_dtype=torch.floataa , ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() UpperCamelCase__ :int = '''a cat sitting on a park bench''' UpperCamelCase__ :int = torch.manual_seed(0 ) UpperCamelCase__ :int = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , ) UpperCamelCase__ :Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowerCAmelCase__ ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCamelCase__ :Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) UpperCamelCase__ :Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler''' UpperCamelCase__ :List[Any] = StableDiffusionUpscalePipeline.from_pretrained( UpperCamelCase_ , torch_dtype=torch.floataa , ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCamelCase__ :List[Any] = '''a cat sitting on a park bench''' UpperCamelCase__ :Any = torch.manual_seed(0 ) UpperCamelCase__ :Union[str, Any] = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='''np''' , ) UpperCamelCase__ :Optional[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
97
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
0
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCAmelCase__ : List[Any] = '\\n\n' lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def __lowerCAmelCase ( self : str ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,) def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ = 'cuda' else: UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu' UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ ) UpperCAmelCase__ = model.to(lowerCamelCase__ ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(lowerCamelCase__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ = model.config.max_length - 1 else: UpperCAmelCase__ = model.config.max_length UpperCAmelCase__ = tokenizer( lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ ) UpperCAmelCase__ = encodings['input_ids'] UpperCAmelCase__ = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ = [] UpperCAmelCase__ = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ): UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) ) UpperCAmelCase__ = encoded_texts[start_index:end_index] UpperCAmelCase__ = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ ) UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 ) UpperCAmelCase__ = torch.cat( [torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 ) UpperCAmelCase__ = encoded_batch with torch.no_grad(): UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits UpperCAmelCase__ = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ = labels[..., 1:].contiguous() UpperCAmelCase__ = attn_mask[..., 1:].contiguous() UpperCAmelCase__ = torch.expa( (loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
98
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class A__ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self) -> Any: '''simple docstring''' a__ : int = 'hf-internal-testing/tiny-random-t5' a__ : List[str] = AutoTokenizer.from_pretrained(lowercase) a__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase) a__ : List[str] = tokenizer('This is me' , return_tensors='pt') a__ : List[Any] = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules())) a__ : List[str] = model.generate(**lowercase) a__ : Tuple = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase) a__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowercase) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules())) a__ : Tuple = model_reloaded.generate(**lowercase) self.assertTrue(torch.allclose(lowercase , lowercase)) def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : Any = 'hf-internal-testing/tiny-random-t5' a__ : str = AutoModelForSeqaSeqLM.from_pretrained(lowercase) a__ : Optional[Any] = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(lowercase): model.save_pretrained(lowercase) a__ : str = model.reverse_bettertransformer() model.save_pretrained(lowercase)
99
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class SCREAMING_SNAKE_CASE_ ( nn.Module ): """simple docstring""" def __init__( self): super().__init__() __SCREAMING_SNAKE_CASE = nn.Linear(3 , 4) __SCREAMING_SNAKE_CASE = nn.BatchNormad(4) __SCREAMING_SNAKE_CASE = nn.Linear(4 , 5) def snake_case_ ( self , lowerCAmelCase__): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__))) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): __SCREAMING_SNAKE_CASE = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCAmelCase__ , model.state_dict()) __SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , """index.json""") self.assertTrue(os.path.isfile(lowerCAmelCase__)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , f"{key}.dat") self.assertTrue(os.path.isfile(lowerCAmelCase__)) # TODO: add tests on the fact weights are properly loaded def snake_case_ ( self): __SCREAMING_SNAKE_CASE = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 , dtype=lowerCAmelCase__) with TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE = offload_weight(lowerCAmelCase__ , """weight""" , lowerCAmelCase__ , {}) __SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , """weight.dat""") self.assertTrue(os.path.isfile(lowerCAmelCase__)) self.assertDictEqual(lowerCAmelCase__ , {"""weight""": {"""shape""": [2, 3], """dtype""": str(lowerCAmelCase__).split(""".""")[1]}}) __SCREAMING_SNAKE_CASE = load_offloaded_weight(lowerCAmelCase__ , index["""weight"""]) self.assertTrue(torch.equal(lowerCAmelCase__ , lowerCAmelCase__)) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = ModelForTest() __SCREAMING_SNAKE_CASE = model.state_dict() __SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """linear2""" not in k} __SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """linear2""" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__) # Every key is there with the right value self.assertEqual(sorted(lowerCAmelCase__) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key])) __SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """weight""" in k} __SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """weight""" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__) # Every key is there with the right value self.assertEqual(sorted(lowerCAmelCase__) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__) # Duplicates are removed __SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__) # Every key is there with the right value self.assertEqual(sorted(lowerCAmelCase__) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key])) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = {"""a.1""": 0, """a.10""": 1, """a.2""": 2} __SCREAMING_SNAKE_CASE = extract_submodules_state_dict(lowerCAmelCase__ , ["""a.1""", """a.2"""]) self.assertDictEqual(lowerCAmelCase__ , {"""a.1""": 0, """a.2""": 2}) __SCREAMING_SNAKE_CASE = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2} __SCREAMING_SNAKE_CASE = extract_submodules_state_dict(lowerCAmelCase__ , ["""a.1""", """a.2"""]) self.assertDictEqual(lowerCAmelCase__ , {"""a.1.a""": 0, """a.2.a""": 2})
100
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): lowercase_ : List[Any] =KandinskyVaaInpaintPipeline lowercase_ : Union[str, Any] =['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] lowercase_ : Optional[int] =[ '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] lowercase_ : Union[str, Any] =[ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase_ : Tuple =False @property def A__ ( self): return 3_2 @property def A__ ( self): return 3_2 @property def A__ ( self): return self.time_input_dim @property def A__ ( self): return self.time_input_dim * 4 @property def A__ ( self): return 1_0_0 @property def A__ ( self): torch.manual_seed(0) lowercase = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowercase = UNetaDConditionModel(**A__) return model @property def A__ ( self): return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def A__ ( self): torch.manual_seed(0) lowercase = VQModel(**self.dummy_movq_kwargs) return model def A__ ( self): lowercase = self.dummy_unet lowercase = self.dummy_movq lowercase = DDIMScheduler( num_train_timesteps=1_0_0_0 ,beta_schedule='''linear''' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=A__ ,set_alpha_to_one=A__ ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=A__ ,) lowercase = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def A__ ( self ,A__ ,A__=0): lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A__)).to(A__) lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to( A__) # create init_image lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(A__)).to(A__) lowercase = image.cpu().permute(0 ,2 ,3 ,1)[0] lowercase = Image.fromarray(np.uinta(A__)).convert('''RGB''').resize((2_5_6, 2_5_6)) # create mask lowercase = np.ones((6_4, 6_4) ,dtype=np.floataa) lowercase = 0 if str(A__).startswith('''mps'''): lowercase = torch.manual_seed(A__) else: lowercase = torch.Generator(device=A__).manual_seed(A__) lowercase = { '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 6_4, '''width''': 6_4, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def A__ ( self): lowercase = '''cpu''' lowercase = self.get_dummy_components() lowercase = self.pipeline_class(**A__) lowercase = pipe.to(A__) pipe.set_progress_bar_config(disable=A__) lowercase = pipe(**self.get_dummy_inputs(A__)) lowercase = output.images lowercase = pipe( **self.get_dummy_inputs(A__) ,return_dict=A__ ,)[0] lowercase = image[0, -3:, -3:, -1] lowercase = image_from_tuple[0, -3:, -3:, -1] print(f'image.shape {image.shape}') assert image.shape == (1, 6_4, 6_4, 3) lowercase = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def A__ ( self): super().test_inference_batch_single_identical(expected_max_diff=3E-3) @slow @require_torch_gpu class lowercase ( unittest.TestCase ): def A__ ( self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self): lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''') lowercase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''') lowercase = np.ones((7_6_8, 7_6_8) ,dtype=np.floataa) lowercase = 0 lowercase = '''a hat''' lowercase = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa) pipe_prior.to(A__) lowercase = KandinskyVaaInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder-inpaint''' ,torch_dtype=torch.floataa) lowercase = pipeline.to(A__) pipeline.set_progress_bar_config(disable=A__) lowercase = torch.Generator(device='''cpu''').manual_seed(0) lowercase , lowercase = pipe_prior( A__ ,generator=A__ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple() lowercase = pipeline( image=A__ ,mask_image=A__ ,image_embeds=A__ ,negative_image_embeds=A__ ,generator=A__ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type='''np''' ,) lowercase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(A__ ,A__)
101
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
0
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] ) ->Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = AlbertConfig.from_json_file(_snake_case ) print(f"""Building PyTorch model from configuration: {config}""" ) __snake_case : Tuple = AlbertForPreTraining(_snake_case ) # Load weights from tf checkpoint load_tf_weights_in_albert(_snake_case , _snake_case , _snake_case ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
102
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class __snake_case ( UpperCamelCase_ ): # to overwrite at feature extractactor specific tests _a = None _a = None @property def UpperCAmelCase__ ( self : int): return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(A_ , '''feature_size''')) self.assertTrue(hasattr(A_ , '''sampling_rate''')) self.assertTrue(hasattr(A_ , '''padding_value''')) def UpperCAmelCase__ ( self : Dict): lowerCAmelCase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict) lowerCAmelCase_ : int = feat_extract.model_input_names[0] lowerCAmelCase_ : str = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(A_) == len(A_) for x, y in zip(A_ , processed_features[input_name]))) lowerCAmelCase_ : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_) lowerCAmelCase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''') lowerCAmelCase_ : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape) < 3: lowerCAmelCase_ : Union[str, Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_torch def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_) lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict) lowerCAmelCase_ : List[Any] = feat_extract.model_input_names[0] lowerCAmelCase_ : int = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''') lowerCAmelCase_ : List[str] = processed_features[input_name] if len(batch_features_input.shape) < 3: lowerCAmelCase_ : Any = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_tf def UpperCAmelCase__ ( self : Optional[int]): lowerCAmelCase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_) lowerCAmelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict) lowerCAmelCase_ : Union[str, Any] = feat_extract.model_input_names[0] lowerCAmelCase_ : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''') lowerCAmelCase_ : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape) < 3: lowerCAmelCase_ : List[str] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any]=False): def _inputs_have_equal_length(A_ : Optional[Any]): lowerCAmelCase_ : Optional[Any] = len(input[0]) for input_slice in input[1:]: if len(A_) != length: return False return True def _inputs_are_equal(A_ : Union[str, Any] , A_ : str): if len(A_) != len(A_): return False for input_slice_a, input_slice_a in zip(A_ , A_): if not np.allclose(np.asarray(A_) , np.asarray(A_) , atol=1e-3): return False return True lowerCAmelCase_ : int = self.feature_extraction_class(**self.feat_extract_dict) lowerCAmelCase_ : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_) lowerCAmelCase_ : List[Any] = feat_extract.model_input_names[0] lowerCAmelCase_ : List[str] = BatchFeature({input_name: speech_inputs}) lowerCAmelCase_ : Optional[int] = self.feat_extract_tester.seq_length_diff lowerCAmelCase_ : int = self.feat_extract_tester.max_seq_length + pad_diff lowerCAmelCase_ : List[str] = self.feat_extract_tester.min_seq_length lowerCAmelCase_ : int = self.feat_extract_tester.batch_size lowerCAmelCase_ : Tuple = self.feat_extract_tester.feature_size # test padding for List[int] + numpy lowerCAmelCase_ : Dict = feat_extract.pad(A_ , padding=A_) lowerCAmelCase_ : Dict = input_a[input_name] lowerCAmelCase_ : Optional[int] = feat_extract.pad(A_ , padding='''longest''') lowerCAmelCase_ : List[str] = input_a[input_name] lowerCAmelCase_ : Optional[Any] = feat_extract.pad(A_ , padding='''max_length''' , max_length=len(speech_inputs[-1])) lowerCAmelCase_ : Any = input_a[input_name] lowerCAmelCase_ : Any = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''') lowerCAmelCase_ : Dict = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(A_): feat_extract.pad(A_ , padding='''max_length''')[input_name] lowerCAmelCase_ : Dict = feat_extract.pad( A_ , padding='''max_length''' , max_length=A_ , return_tensors='''np''') lowerCAmelCase_ : str = input_a[input_name] self.assertFalse(_inputs_have_equal_length(A_)) self.assertTrue(_inputs_have_equal_length(A_)) self.assertTrue(_inputs_have_equal_length(A_)) self.assertTrue(_inputs_are_equal(A_ , A_)) self.assertTrue(len(input_a[0]) == pad_min_length) self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0]))) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size) # test padding for `pad_to_multiple_of` for List[int] + numpy lowerCAmelCase_ : Optional[int] = feat_extract.pad(A_ , pad_to_multiple_of=1_0) lowerCAmelCase_ : List[Any] = input_a[input_name] lowerCAmelCase_ : Tuple = feat_extract.pad(A_ , padding='''longest''' , pad_to_multiple_of=1_0) lowerCAmelCase_ : Optional[int] = input_a[input_name] lowerCAmelCase_ : Tuple = feat_extract.pad( A_ , padding='''max_length''' , pad_to_multiple_of=1_0 , max_length=A_) lowerCAmelCase_ : Optional[Any] = input_a[input_name] lowerCAmelCase_ : Tuple = feat_extract.pad( A_ , padding='''max_length''' , pad_to_multiple_of=1_0 , max_length=A_ , return_tensors='''np''' , ) lowerCAmelCase_ : Union[str, Any] = input_a[input_name] self.assertTrue(all(len(A_) % 1_0 == 0 for x in input_a)) self.assertTrue(_inputs_are_equal(A_ , A_)) lowerCAmelCase_ : str = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0 self.assertTrue(all(len(A_) == expected_mult_pad_length for x in input_a)) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size) # Check padding value is correct lowerCAmelCase_ : List[Any] = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3) self.assertTrue( abs( np.asarray(input_a[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff)) < 1e-3) self.assertTrue( abs( np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff)) < 1e-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1e-3) def UpperCAmelCase__ ( self : List[Any] , A_ : List[Any]=False): def _inputs_have_equal_length(A_ : Optional[Any]): lowerCAmelCase_ : Optional[Any] = len(input[0]) for input_slice in input[1:]: if len(A_) != length: return False return True def _inputs_are_equal(A_ : List[str] , A_ : Optional[Any]): if len(A_) != len(A_): return False for input_slice_a, input_slice_a in zip(A_ , A_): if not np.allclose(np.asarray(A_) , np.asarray(A_) , atol=1e-3): return False return True lowerCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_dict) lowerCAmelCase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_) lowerCAmelCase_ : Any = feat_extract.model_input_names[0] lowerCAmelCase_ : List[Any] = BatchFeature({input_name: speech_inputs}) # truncate to smallest lowerCAmelCase_ : int = feat_extract.pad( A_ , padding='''max_length''' , max_length=len(speech_inputs[0]) , truncation=A_) lowerCAmelCase_ : int = input_a[input_name] lowerCAmelCase_ : Tuple = feat_extract.pad(A_ , padding='''max_length''' , max_length=len(speech_inputs[0])) lowerCAmelCase_ : str = input_a[input_name] self.assertTrue(_inputs_have_equal_length(A_)) self.assertFalse(_inputs_have_equal_length(A_)) # truncate to smallest with np lowerCAmelCase_ : Union[str, Any] = feat_extract.pad( A_ , padding='''max_length''' , max_length=len(speech_inputs[0]) , return_tensors='''np''' , truncation=A_ , ) lowerCAmelCase_ : Tuple = input_a[input_name] lowerCAmelCase_ : Any = feat_extract.pad( A_ , padding='''max_length''' , max_length=len(speech_inputs[0]) , return_tensors='''np''') lowerCAmelCase_ : Optional[Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(A_)) self.assertTrue(input_a.shape[1] == len(speech_inputs[0])) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(A_)) # truncate to middle lowerCAmelCase_ : List[Any] = feat_extract.pad( A_ , padding='''max_length''' , max_length=len(speech_inputs[1]) , truncation=A_ , return_tensors='''np''' , ) lowerCAmelCase_ : Dict = input_a[input_name] lowerCAmelCase_ : int = feat_extract.pad( A_ , padding='''max_length''' , max_length=len(speech_inputs[1]) , truncation=A_) lowerCAmelCase_ : int = input_a[input_name] lowerCAmelCase_ : Tuple = feat_extract.pad( A_ , padding='''max_length''' , max_length=len(speech_inputs[1]) , return_tensors='''np''') lowerCAmelCase_ : int = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(A_)) self.assertTrue(_inputs_have_equal_length(A_)) self.assertTrue(_inputs_are_equal(A_ , A_)) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(A_)) self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1])) # padding has to be max_length when setting `truncation=True` with self.assertRaises(A_): feat_extract.pad(A_ , truncation=A_)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(A_): feat_extract.pad(A_ , padding='''longest''' , truncation=A_)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(A_): feat_extract.pad(A_ , padding='''longest''' , truncation=A_)[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(A_): feat_extract.pad(A_ , padding='''max_length''' , truncation=A_)[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy lowerCAmelCase_ : Dict = 1_2 lowerCAmelCase_ : Dict = feat_extract.pad( A_ , padding='''max_length''' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=A_ , truncation=A_ , ) lowerCAmelCase_ : Any = input_a[input_name] lowerCAmelCase_ : List[str] = feat_extract.pad( A_ , padding='''max_length''' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=A_ , ) lowerCAmelCase_ : Dict = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of lowerCAmelCase_ : Optional[int] = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: lowerCAmelCase_ : Any = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(A_)) self.assertFalse(_inputs_have_equal_length(A_)) def UpperCAmelCase__ ( self : List[Any]): self._check_padding(numpify=A_) def UpperCAmelCase__ ( self : Tuple): self._check_padding(numpify=A_) def UpperCAmelCase__ ( self : Dict): self._check_truncation(numpify=A_) def UpperCAmelCase__ ( self : Dict): self._check_truncation(numpify=A_) @require_torch def UpperCAmelCase__ ( self : int): lowerCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict) lowerCAmelCase_ : int = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ : Dict = feat_extract.model_input_names[0] lowerCAmelCase_ : List[str] = BatchFeature({input_name: speech_inputs}) lowerCAmelCase_ : Union[str, Any] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''')[input_name] lowerCAmelCase_ : str = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''pt''')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2) @require_tf def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict) lowerCAmelCase_ : List[str] = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ : Optional[int] = feat_extract.model_input_names[0] lowerCAmelCase_ : List[Any] = BatchFeature({input_name: speech_inputs}) lowerCAmelCase_ : Optional[Any] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''')[input_name] lowerCAmelCase_ : List[str] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''tf''')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2) def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : Union[str, Any] = self.feat_extract_dict lowerCAmelCase_ : Optional[Any] = True lowerCAmelCase_ : str = self.feature_extraction_class(**A_) lowerCAmelCase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ : List[str] = [len(A_) for x in speech_inputs] lowerCAmelCase_ : Tuple = feat_extract.model_input_names[0] lowerCAmelCase_ : int = BatchFeature({input_name: speech_inputs}) lowerCAmelCase_ : int = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''') self.assertIn('''attention_mask''' , A_) self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist() , A_) def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : int = self.feat_extract_dict lowerCAmelCase_ : Optional[int] = True lowerCAmelCase_ : str = self.feature_extraction_class(**A_) lowerCAmelCase_ : str = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ : str = [len(A_) for x in speech_inputs] lowerCAmelCase_ : Union[str, Any] = feat_extract.model_input_names[0] lowerCAmelCase_ : Tuple = BatchFeature({input_name: speech_inputs}) lowerCAmelCase_ : Any = min(A_) lowerCAmelCase_ : List[str] = feat_extract.pad( A_ , padding='''max_length''' , max_length=A_ , truncation=A_ , return_tensors='''np''') self.assertIn('''attention_mask''' , A_) self.assertListEqual( list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length]) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
103
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
0
'''simple docstring''' import re def _A ( A__ ): """simple docstring""" __lowercase = re.compile( R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' ) return bool(re.search(A__ , A__ ) ) if __name__ == "__main__": lowerCAmelCase__ = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
104
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=a__ ): lowerCamelCase : Tuple =["""speech"""] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]: requires_backends(self , ["speech"] ) class __UpperCamelCase ( metaclass=a__ ): lowerCamelCase : Optional[Any] =["""speech"""] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str: requires_backends(self , ["speech"] )
105
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : Any = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "xlm-roberta" def __init__( self : Tuple ,lowercase_ : Optional[int]=3_0_5_2_2 ,lowercase_ : List[Any]=7_6_8 ,lowercase_ : Optional[Any]=1_2 ,lowercase_ : Dict=1_2 ,lowercase_ : List[str]=3_0_7_2 ,lowercase_ : Optional[int]="gelu" ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : List[Any]=5_1_2 ,lowercase_ : List[str]=2 ,lowercase_ : List[str]=0.02 ,lowercase_ : List[str]=1E-12 ,lowercase_ : List[str]=1 ,lowercase_ : Optional[Any]=0 ,lowercase_ : Any=2 ,lowercase_ : Tuple="absolute" ,lowercase_ : Optional[int]=True ,lowercase_ : Optional[int]=None ,**lowercase_ : Tuple ,): super().__init__(pad_token_id=lowercase_ ,bos_token_id=lowercase_ ,eos_token_id=lowercase_ ,**lowercase_ ) lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : Union[str, Any] = num_attention_heads lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = hidden_dropout_prob lowerCAmelCase__ : Dict = attention_probs_dropout_prob lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : Union[str, Any] = layer_norm_eps lowerCAmelCase__ : Tuple = position_embedding_type lowerCAmelCase__ : Any = use_cache lowerCAmelCase__ : int = classifier_dropout class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" @property def __lowerCAmelCase ( self : Any ): if self.task == "multiple-choice": lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
106
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
from __future__ import annotations def __magic_name__ ( A : int ): '''simple docstring''' a = str(A ) return len(A ) == 9 and set(A ) == set("123456789" ) def __magic_name__ ( ): '''simple docstring''' for base_num in range(9999, 4999, -1 ): a = 100002 * base_num if is_9_pandigital(A ): return candidate for base_num in range(333, 99, -1 ): a = 1002003 * base_num if is_9_pandigital(A ): return candidate return None if __name__ == "__main__": print(F'''{solution() = }''')
107
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
108
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
0
"""simple docstring""" def _snake_case ( UpperCamelCase : int ): UpperCAmelCase : Union[str, Any] = [0] * len(UpperCamelCase ) UpperCAmelCase : List[Any] = [] UpperCAmelCase : Optional[int] = [1] * len(UpperCamelCase ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(UpperCamelCase ) ): if indegree[i] == 0: queue.append(UpperCamelCase ) while queue: UpperCAmelCase : Optional[Any] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCAmelCase : Optional[int] = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(UpperCamelCase ) print(max(UpperCamelCase ) ) # Adjacency list of Graph A: Tuple = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
109
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
0
def _a ( SCREAMING_SNAKE_CASE = 1_00_00_00 ): """simple docstring""" lowercase__ = limit + 1 lowercase__ = [0] * limit for first_term in range(1 , SCREAMING_SNAKE_CASE ): for n in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase__ = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
110
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
"""simple docstring""" import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup snake_case_ = { """User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36""" """ (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""" } def _lowerCAmelCase ( lowercase_ = "dhaka" , lowercase_ = 5 ): UpperCAmelCase = min(A_ , 50 ) # Prevent abuse! UpperCAmelCase = { "q": query, "tbm": "isch", "hl": "en", "ijn": "0", } UpperCAmelCase = requests.get('https://www.google.com/search' , params=A_ , headers=A_ ) UpperCAmelCase = BeautifulSoup(html.text , 'html.parser' ) UpperCAmelCase = "".join( re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) UpperCAmelCase = json.dumps(A_ ) UpperCAmelCase = json.loads(A_ ) UpperCAmelCase = re.findall( R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , A_ , ) if not matched_google_image_data: return 0 UpperCAmelCase = re.sub( R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(A_ ) , ) UpperCAmelCase = re.findall( R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , A_ , ) for index, fixed_full_res_image in enumerate(A_ ): if index >= max_images: return index UpperCAmelCase = bytes(A_ , 'ascii' ).decode( 'unicode-escape' ) UpperCAmelCase = bytes(A_ , 'ascii' ).decode( 'unicode-escape' ) UpperCAmelCase = urllib.request.build_opener() UpperCAmelCase = [ ( "User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582", ) ] urllib.request.install_opener(A_ ) UpperCAmelCase = F"""query_{query.replace(' ' , '_' )}""" if not os.path.exists(A_ ): os.makedirs(A_ ) urllib.request.urlretrieve( # noqa: S310 A_ , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: snake_case_ = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print("""Please provide a search term.""") raise
78
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml A : Any = logging.get_logger(__name__) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' def run_func(_UpperCamelCase ): @wraps(A_ ) def run_in_eager_mode(*_UpperCamelCase , **_UpperCamelCase ): return func(*A_ , **A_ ) @wraps(A_ ) @tf.function(experimental_compile=A_ ) def run_in_graph_mode(*_UpperCamelCase , **_UpperCamelCase ): return func(*A_ , **A_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = random.Random() __lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(A_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class _UpperCamelCase ( _a ): '''simple docstring''' __UpperCAmelCase : TensorFlowBenchmarkArguments __UpperCAmelCase : PretrainedConfig __UpperCAmelCase : str ="TensorFlow" @property def snake_case ( self ): return tf.__version__ def snake_case ( self , __a , __a , __a ): # initialize GPU on separate process __lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) __lowerCAmelCase = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_speed(_inference ) def snake_case ( self , __a , __a , __a ): __lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) __lowerCAmelCase = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_speed(_train ) def snake_case ( self , __a , __a , __a ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase ) __lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) __lowerCAmelCase = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_memory(_inference ) def snake_case ( self , __a , __a , __a ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase ) __lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) __lowerCAmelCase = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return self._measure_memory(_train ) def snake_case ( self , __a , __a , __a ): __lowerCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) __lowerCAmelCase = ( hasattr(__UpperCAmelCase , "architectures" ) and isinstance(config.architectures , __UpperCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __lowerCAmelCase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model __lowerCAmelCase = __import__("transformers" , fromlist=[model_class] ) __lowerCAmelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ) __lowerCAmelCase = model_cls(__UpperCAmelCase ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: __lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](__UpperCAmelCase ) # encoder-decoder has vocab size saved differently __lowerCAmelCase = config.vocab_size if hasattr(__UpperCAmelCase , "vocab_size" ) else config.encoder.vocab_size __lowerCAmelCase = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , training=__UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(__UpperCAmelCase , training=__UpperCAmelCase ) __lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def snake_case ( self , __a , __a , __a ): __lowerCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." ) if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) __lowerCAmelCase = ( hasattr(__UpperCAmelCase , "architectures" ) and isinstance(config.architectures , __UpperCAmelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __lowerCAmelCase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model __lowerCAmelCase = __import__("transformers" , fromlist=[model_class] ) __lowerCAmelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ) __lowerCAmelCase = model_cls(__UpperCAmelCase ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: __lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCAmelCase ) # encoder-decoder has vocab size saved differently __lowerCAmelCase = config.vocab_size if hasattr(__UpperCAmelCase , "vocab_size" ) else config.encoder.vocab_size __lowerCAmelCase = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): __lowerCAmelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0] __lowerCAmelCase = tf.gradients(__UpperCAmelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): __lowerCAmelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0] __lowerCAmelCase = tf.gradients(__UpperCAmelCase , model.trainable_variables ) return gradients __lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def snake_case ( self , __a ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" ) timeit.repeat(__UpperCAmelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average __lowerCAmelCase = timeit.repeat( __UpperCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__UpperCAmelCase ) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn\'t fit on GPU. {e}" ) def snake_case ( self , __a ): logger.info( "Note that TensorFlow allocates more memory than " "it might need to speed up computation. " "The memory reported here corresponds to the memory " "reported by `nvidia-smi`, which can vary depending " "on total available memory on the GPU that is used." ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory" " consumption line by line." ) __lowerCAmelCase = start_memory_tracing("transformers" ) if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking" " with `args.memory=False`" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) __lowerCAmelCase = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes" " running on the same GPU." ) # init nvml nvml.nvmlInit() func() __lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) __lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__UpperCAmelCase ) __lowerCAmelCase = meminfo.used __lowerCAmelCase = Memory(__UpperCAmelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in" " TensorFlow." ) __lowerCAmelCase = None else: __lowerCAmelCase = measure_peak_memory_cpu(__UpperCAmelCase ) __lowerCAmelCase = Memory(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else memory_bytes if self.args.trace_memory_line_by_line: __lowerCAmelCase = stop_memory_tracing(__UpperCAmelCase ) if memory is None: __lowerCAmelCase = summary.total else: __lowerCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn\'t fit on GPU. {e}" ) return "N/A", None
57
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ): __magic_name__ : Union[str, Any] = parent __magic_name__ : Optional[int] = batch_size __magic_name__ : str = image_size __magic_name__ : List[Any] = num_channels __magic_name__ : List[str] = num_stages __magic_name__ : Optional[int] = hidden_sizes __magic_name__ : List[Any] = depths __magic_name__ : int = is_training __magic_name__ : Optional[int] = use_labels __magic_name__ : str = intermediate_size __magic_name__ : str = hidden_act __magic_name__ : int = num_labels __magic_name__ : Tuple = initializer_range __magic_name__ : Optional[Any] = out_features __magic_name__ : List[Any] = out_indices __magic_name__ : Union[str, Any] = scope def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ : str = None if self.use_labels: __magic_name__ : Any = ids_tensor([self.batch_size] , self.num_labels ) __magic_name__ : Optional[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ): __magic_name__ : int = ConvNextVaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __magic_name__ : List[str] = model(__UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ): __magic_name__ : Optional[Any] = ConvNextVaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __magic_name__ : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ): __magic_name__ : Optional[int] = ConvNextVaBackbone(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __magic_name__ : List[Any] = model(__UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __magic_name__ : Any = None __magic_name__ : Optional[int] = ConvNextVaBackbone(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __magic_name__ : Optional[Any] = model(__UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : str = self.prepare_config_and_inputs() __magic_name__ : Any = config_and_inputs __magic_name__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : int = self.prepare_config_and_inputs() __magic_name__ : Optional[int] = config_and_inputs __magic_name__ : Optional[int] = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _snake_case ( _a , _a , unittest.TestCase ): UpperCamelCase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) UpperCamelCase__ = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Tuple = ConvNextVaModelTester(self ) __magic_name__ : str = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self ): return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self ): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def SCREAMING_SNAKE_CASE ( self ): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def SCREAMING_SNAKE_CASE ( self ): pass def SCREAMING_SNAKE_CASE ( self ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_with_labels() __magic_name__ : Tuple = True if model_class.__name__ in [ *get_values(__UpperCAmelCase ), *get_values(__UpperCAmelCase ), ]: continue __magic_name__ : int = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() __magic_name__ : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __magic_name__ : Optional[int] = model(**__UpperCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() __magic_name__ : Optional[int] = False __magic_name__ : Optional[Any] = True if ( model_class.__name__ in [*get_values(__UpperCAmelCase ), *get_values(__UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue __magic_name__ : Tuple = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() __magic_name__ : Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __magic_name__ : Optional[int] = model(**__UpperCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Any = model_class(__UpperCAmelCase ) __magic_name__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ : Any = [*signature.parameters.keys()] __magic_name__ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self ): def check_hidden_states_output(_a , _a , _a ): __magic_name__ : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): __magic_name__ : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __magic_name__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __magic_name__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : List[Any] = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ : Dict = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : Tuple = ConvNextVaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCAmelCase_ ( ) -> List[str]: '''simple docstring''' __magic_name__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self ): return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Optional[int] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__UpperCAmelCase ) __magic_name__ : Any = self.default_image_processor __magic_name__ : Union[str, Any] = prepare_img() __magic_name__ : List[str] = preprocessor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __magic_name__ : Any = model(**__UpperCAmelCase ) # verify the logits __magic_name__ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __magic_name__ : Union[str, Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
281
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(A_ ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(A_ ): yield num num += 1 def __snake_case ( __UpperCamelCase : Dict = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,A_ ) ) if __name__ == "__main__": print(F"{solution() = }")
312
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0
def lowerCamelCase__ ( _a = 200): SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 200] SCREAMING_SNAKE_CASE : Optional[int] = [0] * (pence + 1) SCREAMING_SNAKE_CASE : Tuple = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(A_ , pence + 1 , 1): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 7_3682
76
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
0
def a__ ( UpperCAmelCase : List[Any] = 1_000 ) -> int: UpperCAmelCase : List[str] = 3 UpperCAmelCase : List[str] = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f"""{solution() = }""")
336
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: lowercase__ = None lowercase__ = logging.get_logger(__name__) lowercase__ = """▁""" lowercase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowercase__ = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } lowercase__ = { """google/pegasus-xsum""": 512, } class __lowerCamelCase ( _a ): '''simple docstring''' a_ : Optional[int] = VOCAB_FILES_NAMES a_ : int = PRETRAINED_VOCAB_FILES_MAP a_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : Optional[Any] = PegasusTokenizer a_ : str = ["""input_ids""", """attention_mask"""] def __init__( self : Dict , a_ : Dict=None , a_ : Optional[int]=None , a_ : List[Any]="<pad>" , a_ : Optional[int]="</s>" , a_ : Optional[int]="<unk>" , a_ : str="<mask_2>" , a_ : List[str]="<mask_1>" , a_ : str=None , a_ : Any=1_03 , **a_ : Union[str, Any] , ): lowerCAmelCase_ : List[Any] = offset if additional_special_tokens is not None: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError( f'''additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is''' f''' {type(__UpperCAmelCase )}''' ) lowerCAmelCase_ : Dict = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(__UpperCAmelCase ) , self.offset - 1 ) ] if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) lowerCAmelCase_ : int = additional_special_tokens_extended else: lowerCAmelCase_ : List[str] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase_ : str = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowerCamelCase ( self : Tuple , a_ : int ): lowerCAmelCase_ : int = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowerCamelCase ( self : Any , a_ : List , a_ : Optional[List] = None , a_ : bool = False ): if already_has_special_tokens: return self._special_token_mask(__UpperCAmelCase ) elif token_ids_a is None: return self._special_token_mask(__UpperCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowerCamelCase ( self : Optional[int] , a_ : Tuple , a_ : Optional[int]=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowerCamelCase ( self : Optional[int] , a_ : str , a_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase_ : Dict = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
241
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
0
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> Optional[int]: __lowercase = StableDiffusionPipeline.from_pretrained(A_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __lowercase = load_file(A_ ) __lowercase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __lowercase = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) __lowercase = pipeline.text_encoder else: __lowercase = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) __lowercase = pipeline.unet # find the target layer __lowercase = layer_infos.pop(0 ) while len(A_ ) > -1: try: __lowercase = curr_layer.__getattr__(A_ ) if len(A_ ) > 0: __lowercase = layer_infos.pop(0 ) elif len(A_ ) == 0: break except Exception: if len(A_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __lowercase = layer_infos.pop(0 ) __lowercase = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(A_ ) else: pair_keys.append(A_ ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __lowercase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __lowercase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A_ , A_ ).unsqueeze(2 ).unsqueeze(3 ) else: __lowercase = state_dict[pair_keys[0]].to(torch.floataa ) __lowercase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A_ , A_ ) # update visited list for item in pair_keys: visited.append(A_ ) return pipeline if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = args.base_model_path SCREAMING_SNAKE_CASE__ = args.checkpoint_path SCREAMING_SNAKE_CASE__ = args.dump_path SCREAMING_SNAKE_CASE__ = args.lora_prefix_unet SCREAMING_SNAKE_CASE__ = args.lora_prefix_text_encoder SCREAMING_SNAKE_CASE__ = args.alpha SCREAMING_SNAKE_CASE__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) SCREAMING_SNAKE_CASE__ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
325
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
0
__lowercase = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
43
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
0
"""simple docstring""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Dict = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class snake_case_( _a ): __UpperCamelCase = """encodec""" def __init__( self : Any , UpperCamelCase_ : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase_ : str=2_4_0_0_0 , UpperCamelCase_ : List[Any]=1 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[Any]=1_2_8 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : str=1 , UpperCamelCase_ : str=[8, 5, 4, 2] , UpperCamelCase_ : Union[str, Any]="weight_norm" , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=True , UpperCamelCase_ : str="reflect" , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : str=1.0 , UpperCamelCase_ : str=1_0_2_4 , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]=True , **UpperCamelCase_ : Dict , ): lowerCAmelCase : Any = target_bandwidths lowerCAmelCase : str = sampling_rate lowerCAmelCase : Union[str, Any] = audio_channels lowerCAmelCase : Optional[int] = normalize lowerCAmelCase : Optional[Any] = chunk_length_s lowerCAmelCase : str = overlap lowerCAmelCase : List[str] = hidden_size lowerCAmelCase : List[str] = num_filters lowerCAmelCase : Union[str, Any] = num_residual_layers lowerCAmelCase : Tuple = upsampling_ratios lowerCAmelCase : Tuple = norm_type lowerCAmelCase : List[str] = kernel_size lowerCAmelCase : Tuple = last_kernel_size lowerCAmelCase : Union[str, Any] = residual_kernel_size lowerCAmelCase : Union[str, Any] = dilation_growth_rate lowerCAmelCase : Any = use_causal_conv lowerCAmelCase : str = pad_mode lowerCAmelCase : Dict = compress lowerCAmelCase : Union[str, Any] = num_lstm_layers lowerCAmelCase : List[Any] = trim_right_ratio lowerCAmelCase : str = codebook_size lowerCAmelCase : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size lowerCAmelCase : int = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**__UpperCAmelCase ) @property def lowerCamelCase__ ( self : Union[str, Any] ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def lowerCamelCase__ ( self : Union[str, Any] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[int] = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def lowerCamelCase__ ( self : List[str] ): return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
60
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class snake_case__(_a ): """simple docstring""" lowercase_ = """roberta""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple=50_265 , SCREAMING_SNAKE_CASE : Dict=768 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : Any=3_072 , SCREAMING_SNAKE_CASE : Optional[int]="gelu" , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=512 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : List[Any]=0.02 , SCREAMING_SNAKE_CASE : List[str]=1E-1_2 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : str=0 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Optional[int]="absolute" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : str , ): super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) lowercase__ : int = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Dict = num_attention_heads lowercase__ : Dict = hidden_act lowercase__ : int = intermediate_size lowercase__ : str = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : str = type_vocab_size lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[Any] = position_embedding_type lowercase__ : Any = use_cache lowercase__ : List[str] = classifier_dropout class snake_case__(_a ): """simple docstring""" @property def snake_case ( self : int ): if self.task == "multiple-choice": lowercase__ : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
130
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" def _lowerCAmelCase ( lowercase_ , lowercase_ ): return number | (1 << position) def _lowerCAmelCase ( lowercase_ , lowercase_ ): return number & ~(1 << position) def _lowerCAmelCase ( lowercase_ , lowercase_ ): return number ^ (1 << position) def _lowerCAmelCase ( lowercase_ , lowercase_ ): return ((number >> position) & 1) == 1 def _lowerCAmelCase ( lowercase_ , lowercase_ ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
78
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : Dict = { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json", } class _UpperCamelCase ( _a ): '''simple docstring''' __UpperCAmelCase : Optional[Any] ="""lxmert""" __UpperCAmelCase : str ={} def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=95_00 , __a=16_00 , __a=4_00 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=9 , __a=5 , __a=5 , __a=20_48 , __a=4 , __a=6.6_7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=True , __a=True , **__a , ): __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = num_qa_labels __lowerCAmelCase = num_object_labels __lowerCAmelCase = num_attr_labels __lowerCAmelCase = l_layers __lowerCAmelCase = x_layers __lowerCAmelCase = r_layers __lowerCAmelCase = visual_feat_dim __lowerCAmelCase = visual_pos_dim __lowerCAmelCase = visual_loss_normalizer __lowerCAmelCase = task_matched __lowerCAmelCase = task_mask_lm __lowerCAmelCase = task_obj_predict __lowerCAmelCase = task_qa __lowerCAmelCase = visual_obj_loss __lowerCAmelCase = visual_attr_loss __lowerCAmelCase = visual_feat_loss __lowerCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**__UpperCAmelCase )
57
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
0
def lowerCAmelCase_ ( _snake_case : Dict = 10**12 ) -> int: '''simple docstring''' __magic_name__ : List[str] = 1 __magic_name__ : Optional[Any] = 0 __magic_name__ : Any = 1 __magic_name__ : Optional[int] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F"{solution() = }")
281
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0
from string import ascii_uppercase __a :Union[str, Any] = {char: i for i, char in enumerate(ascii_uppercase)} __a :int = dict(enumerate(ascii_uppercase)) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : str ): """simple docstring""" A_ = len(A_ ) A_ = 0 while True: if x == i: A_ = 0 if len(A_ ) == len(A_ ): break key += key[i] i += 1 return key def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple ): """simple docstring""" A_ = "" A_ = 0 for letter in message: if letter == " ": cipher_text += " " else: A_ = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = "" A_ = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: A_ = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __snake_case ( ): """simple docstring""" A_ = "THE GERMAN ATTACK" A_ = "SECRET" A_ = generate_key(A_ ,A_ ) A_ = cipher_text(A_ ,A_ ) print(f'''Encrypted Text = {s}''' ) print(f'''Original Text = {original_text(A_ ,A_ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
0
from __future__ import annotations def lowerCamelCase__ ( _a): create_state_space_tree(A_ , [] , 0 , [0 for i in range(len(A_))]) def lowerCamelCase__ ( _a , _a , _a , _a , ): if index == len(A_): print(A_) return for i in range(len(A_)): if not index_used[i]: current_sequence.append(sequence[i]) SCREAMING_SNAKE_CASE : Dict = True create_state_space_tree(A_ , A_ , index + 1 , A_) current_sequence.pop() SCREAMING_SNAKE_CASE : int = False a_ = [3, 1, 2, 4] generate_all_permutations(sequence) a_ = ['A', 'B', 'C'] generate_all_permutations(sequence_a)
76
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class __UpperCAmelCase : def __init__( self : int, __A : Union[str, Any], ): UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = 1_3 UpperCAmelCase : Optional[int] = 7 UpperCAmelCase : Tuple = True UpperCAmelCase : str = True UpperCAmelCase : Tuple = False UpperCAmelCase : List[str] = True UpperCAmelCase : Dict = 9_9 UpperCAmelCase : int = 3_2 UpperCAmelCase : Any = 2 UpperCAmelCase : int = 4 UpperCAmelCase : Optional[Any] = 3_7 UpperCAmelCase : int = "gelu" UpperCAmelCase : int = 0.1 UpperCAmelCase : int = 0.1 UpperCAmelCase : Optional[int] = 5_1_2 UpperCAmelCase : int = 1_6 UpperCAmelCase : Tuple = 2 UpperCAmelCase : str = 0.0_2 UpperCAmelCase : List[str] = 3 UpperCAmelCase : Optional[Any] = 4 UpperCAmelCase : List[Any] = None def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[Any] = None UpperCAmelCase : int = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_choices ) UpperCAmelCase : Optional[int] = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : int, __A : List[str], __A : List[Any], __A : int, __A : Optional[Any], __A : Dict, __A : List[str] ): UpperCAmelCase : Optional[Any] = TFDistilBertModel(config=__UpperCAmelCase ) UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase : List[Any] = model(__UpperCAmelCase ) UpperCAmelCase : List[Any] = [input_ids, input_mask] UpperCAmelCase : Any = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : str, __A : str, __A : Optional[Any], __A : Union[str, Any], __A : int, __A : Tuple, __A : int ): UpperCAmelCase : Optional[int] = TFDistilBertForMaskedLM(config=__UpperCAmelCase ) UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase : int = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : Union[str, Any], __A : Any, __A : Union[str, Any], __A : List[Any], __A : Optional[Any], __A : Optional[Any], __A : str ): UpperCAmelCase : Optional[int] = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase ) UpperCAmelCase : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, } UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def __magic_name__ ( self : Tuple, __A : str, __A : Optional[int], __A : Union[str, Any], __A : Dict, __A : Optional[int], __A : Optional[Any] ): UpperCAmelCase : int = self.num_labels UpperCAmelCase : Optional[int] = TFDistilBertForSequenceClassification(__UpperCAmelCase ) UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase : Dict = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __magic_name__ ( self : Dict, __A : Optional[int], __A : str, __A : str, __A : Any, __A : List[str], __A : List[str] ): UpperCAmelCase : str = self.num_choices UpperCAmelCase : Optional[Any] = TFDistilBertForMultipleChoice(__UpperCAmelCase ) UpperCAmelCase : Any = tf.tile(tf.expand_dims(__UpperCAmelCase, 1 ), (1, self.num_choices, 1) ) UpperCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCAmelCase, 1 ), (1, self.num_choices, 1) ) UpperCAmelCase : str = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } UpperCAmelCase : Tuple = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : Tuple, __A : Union[str, Any], __A : Optional[int], __A : Any, __A : List[Any] ): UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : Dict = TFDistilBertForTokenClassification(__UpperCAmelCase ) UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase : Tuple = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ ( self : Any ): UpperCAmelCase : List[str] = self.prepare_config_and_inputs() (UpperCAmelCase) : Optional[Any] = config_and_inputs UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class __UpperCAmelCase ( _a , _a , unittest.TestCase ): UpperCamelCase = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) UpperCamelCase = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : str ): UpperCAmelCase : int = TFDistilBertModelTester(self ) UpperCAmelCase : Any = ConfigTester(self, config_class=__UpperCAmelCase, dim=3_7 ) def __magic_name__ ( self : Optional[Any] ): self.config_tester.run_common_tests() def __magic_name__ ( self : Tuple ): UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase ) @slow def __magic_name__ ( self : Optional[Any] ): for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): UpperCAmelCase : Tuple = TFDistilBertModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class __UpperCAmelCase ( unittest.TestCase ): @slow def __magic_name__ ( self : Tuple ): UpperCAmelCase : List[str] = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) UpperCAmelCase : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0] UpperCAmelCase : Union[str, Any] = [1, 6, 7_6_8] self.assertEqual(output.shape, __UpperCAmelCase ) UpperCAmelCase : Optional[int] = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3], __UpperCAmelCase, atol=1E-4 )
336
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
0
"""simple docstring""" from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowercase__ = datasets.utils.logging.get_logger(__name__) class __lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ): '''simple docstring''' a_ : bool = None a_ : bool = None class __lowerCamelCase ( folder_based_builder.FolderBasedBuilder ): '''simple docstring''' a_ : Any = datasets.Audio() a_ : str = """audio""" a_ : Optional[int] = AudioFolderConfig a_ : List[str] # definition at the bottom of the script a_ : str = AudioClassification(audio_column="""audio""" , label_column="""label""" ) lowercase__ = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] lowercase__ = AUDIO_EXTENSIONS
241
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> int: __lowercase = 1 for i in range(1 , num + 1 ): fact *= i return fact def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> int: __lowercase = 0 while number > 0: __lowercase = number % 10 sum_of_digits += last_digit __lowercase = number // 10 # Removing the last_digit from the given number return sum_of_digits def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] = 100 ) -> int: __lowercase = factorial(A_ ) __lowercase = split_and_add(A_ ) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
325
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[str] = int(A_ ) if decimal in (0, 1): # Exit cases for the recursion return str(A_ ) __UpperCamelCase :int = divmod(A_ , 2 ) return binary_recursive(A_ ) + str(A_ ) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Optional[int] = str(A_ ).strip() if not number: raise ValueError('''No input value was provided''' ) __UpperCamelCase :Any = "-" if number.startswith('''-''' ) else "" __UpperCamelCase :Optional[int] = number.lstrip('''-''' ) if not number.isnumeric(): raise ValueError('''Input value is not an integer''' ) return f"""{negative}0b{binary_recursive(int(A_ ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
43
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
0
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case_: def __init__( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=1_3 , UpperCamelCase_ : Optional[Any]=3_2 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=1_6 , UpperCamelCase_ : int=[1, 2, 1] , UpperCamelCase_ : Union[str, Any]=[2, 2, 4] , UpperCamelCase_ : int=2 , UpperCamelCase_ : int=2.0 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Dict=1E-5 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=True , UpperCamelCase_ : str=1_0 , UpperCamelCase_ : Tuple=8 , ): lowerCAmelCase : List[Any] = parent lowerCAmelCase : str = batch_size lowerCAmelCase : int = image_size lowerCAmelCase : Union[str, Any] = patch_size lowerCAmelCase : Dict = num_channels lowerCAmelCase : List[Any] = embed_dim lowerCAmelCase : Optional[int] = depths lowerCAmelCase : List[Any] = num_heads lowerCAmelCase : Dict = window_size lowerCAmelCase : Tuple = mlp_ratio lowerCAmelCase : Union[str, Any] = qkv_bias lowerCAmelCase : List[str] = hidden_dropout_prob lowerCAmelCase : List[Any] = attention_probs_dropout_prob lowerCAmelCase : Union[str, Any] = drop_path_rate lowerCAmelCase : Optional[int] = hidden_act lowerCAmelCase : Optional[Any] = use_absolute_embeddings lowerCAmelCase : Optional[int] = patch_norm lowerCAmelCase : Optional[Any] = layer_norm_eps lowerCAmelCase : Any = initializer_range lowerCAmelCase : Optional[Any] = is_training lowerCAmelCase : List[str] = scope lowerCAmelCase : int = use_labels lowerCAmelCase : Tuple = type_sequence_label_size lowerCAmelCase : int = encoder_stride def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase : List[Any] = None if self.use_labels: lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : List[str] ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ): lowerCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase : str = model(__UpperCAmelCase ) lowerCAmelCase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int ): lowerCAmelCase : List[Any] = SwinvaForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase : Optional[int] = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase : str = 1 lowerCAmelCase : Optional[Any] = SwinvaForMaskedImageModeling(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase : Optional[int] = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : str = self.type_sequence_label_size lowerCAmelCase : Union[str, Any] = SwinvaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase : Dict = config_and_inputs lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case_( _a , _a , unittest.TestCase ): __UpperCamelCase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __UpperCamelCase = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[int] = SwinvaModelTester(self ) lowerCAmelCase : Dict = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=3_7 ) def lowerCamelCase__ ( self : int ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def lowerCamelCase__ ( self : List[Any] ): pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def lowerCamelCase__ ( self : Optional[int] ): pass def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Tuple = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : str = model_class(__UpperCAmelCase ) lowerCAmelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase : List[str] = [*signature.parameters.keys()] lowerCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = True for model_class in self.all_model_classes: lowerCAmelCase : str = True lowerCAmelCase : Tuple = False lowerCAmelCase : int = True lowerCAmelCase : Tuple = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase : Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase : int = outputs.attentions lowerCAmelCase : List[Any] = len(self.model_tester.depths ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase : Dict = True lowerCAmelCase : Union[str, Any] = config.window_size**2 lowerCAmelCase : List[str] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase : Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase : Union[str, Any] = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase : str = len(__UpperCAmelCase ) # Check attention is always last and order is fine lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : List[str] = True lowerCAmelCase : Tuple = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) if hasattr(self.model_tester , '''num_hidden_states_types''' ): lowerCAmelCase : Union[str, Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase : Any = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) ) lowerCAmelCase : Any = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase : Any = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase : Optional[int] = outputs.hidden_states lowerCAmelCase : Optional[int] = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # Swinv2 has a different seq_length lowerCAmelCase : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase : Optional[int] = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) lowerCAmelCase : str = reshaped_hidden_states[0].shape lowerCAmelCase : str = ( reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase : str = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase : Optional[Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Dict = 3 lowerCAmelCase : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase : Tuple = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def lowerCamelCase__ ( self : List[str] ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Optional[Any] = SwinvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[Any] = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: lowerCAmelCase : int = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class snake_case_( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self : int ): return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( __UpperCAmelCase ) lowerCAmelCase : Union[str, Any] = self.default_image_processor lowerCAmelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase : str = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase : Any = model(**__UpperCAmelCase ) # verify the logits lowerCAmelCase : str = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) lowerCAmelCase : Dict = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
60
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
0
from timeit import timeit lowerCAmelCase__ = { '''MALAYALAM''': True, '''String''': False, '''rotor''': True, '''level''': True, '''A''': True, '''BB''': True, '''ABC''': False, '''amanaplanacanalpanama''': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = 0 lowercase__ : Optional[Any] = len(A_ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : List[Any] = len(A_ ) // 2 lowercase__ : Optional[Any] = len(A_ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(A_ ) ) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if len(A_ ) <= 2: return True if s[0] == s[len(A_ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return s == s[::-1] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Tuple = F"""all({name}(key) is value for key, value in test_data.items())""" lowercase__ : Union[str, Any] = F"""from __main__ import test_data, {name}""" lowercase__ : List[str] = 500_000 lowercase__ : Optional[int] = timeit(stmt=A_ , setup=A_ , number=A_ ) print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f'''{key:21} {value}''') print('''a man a plan a canal panama''') # finished 500,000 runs in 0.46793 seconds benchmark_function('''is_palindrome_slice''') # finished 500,000 runs in 0.85234 seconds benchmark_function('''is_palindrome''') # finished 500,000 runs in 1.32028 seconds benchmark_function('''is_palindrome_recursive''') # finished 500,000 runs in 2.08679 seconds benchmark_function('''is_palindrome_traversal''')
130
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
"""simple docstring""" from __future__ import annotations from random import random class A_ : """simple docstring""" def __init__( self :List[str] , lowercase_ :int | None = None ) -> Optional[Any]: UpperCAmelCase = value UpperCAmelCase = random() UpperCAmelCase = None UpperCAmelCase = None def __repr__( self :str ) -> Tuple: from pprint import pformat if self.left is None and self.right is None: return f"""\'{self.value}: {self.prior:.5}\'""" else: return pformat( {f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self :str ) -> Union[str, Any]: UpperCAmelCase = str(self.value ) + " " UpperCAmelCase = str(self.left or '' ) UpperCAmelCase = str(self.right or '' ) return value + left + right def _lowerCAmelCase ( lowercase_ , lowercase_ ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: UpperCAmelCase = split(root.left , A_ ) return left, root else: UpperCAmelCase = split(root.right , A_ ) return root, right def _lowerCAmelCase ( lowercase_ , lowercase_ ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: UpperCAmelCase = merge(left.right , A_ ) return left else: UpperCAmelCase = merge(A_ , right.left ) return right def _lowerCAmelCase ( lowercase_ , lowercase_ ): UpperCAmelCase = Node(A_ ) UpperCAmelCase = split(A_ , A_ ) return merge(merge(A_ , A_ ) , A_ ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): UpperCAmelCase = split(A_ , value - 1 ) UpperCAmelCase = split(A_ , A_ ) return merge(A_ , A_ ) def _lowerCAmelCase ( lowercase_ ): if not root: # None return else: inorder(root.left ) print(root.value , end=',' ) inorder(root.right ) def _lowerCAmelCase ( lowercase_ , lowercase_ ): for arg in args.split(): if arg[0] == "+": UpperCAmelCase = insert(A_ , int(arg[1:] ) ) elif arg[0] == "-": UpperCAmelCase = erase(A_ , int(arg[1:] ) ) else: print('Unknown command' ) return root def _lowerCAmelCase ( ): UpperCAmelCase = None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) UpperCAmelCase = input() while args != "q": UpperCAmelCase = interact_treap(A_ , A_ ) print(A_ ) UpperCAmelCase = input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
78
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __lowerCAmelCase = n - k # Calculate C(n,k) for i in range(A_ ): result *= n - i result //= i + 1 return result def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return binomial_coefficient(2 * node_count , A_ ) // (node_count + 1) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if n < 0: raise ValueError("factorial() not defined for negative values" ) __lowerCAmelCase = 1 for i in range(1 , n + 1 ): result *= i return result def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return catalan_number(A_ ) * factorial(A_ ) if __name__ == "__main__": A : int = int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
57
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def lowerCAmelCase_ ( _snake_case : List[str] ) -> Any: '''simple docstring''' __magic_name__ : int = SwinvaConfig() __magic_name__ : Any = swinva_name.split("_" ) __magic_name__ : Optional[int] = name_split[1] if "to" in name_split[3]: __magic_name__ : Optional[int] = int(name_split[3][-3:] ) else: __magic_name__ : List[str] = int(name_split[3] ) if "to" in name_split[2]: __magic_name__ : Optional[int] = int(name_split[2][-2:] ) else: __magic_name__ : int = int(name_split[2][6:] ) if model_size == "tiny": __magic_name__ : Optional[int] = 96 __magic_name__ : Any = (2, 2, 6, 2) __magic_name__ : Dict = (3, 6, 12, 24) elif model_size == "small": __magic_name__ : List[str] = 96 __magic_name__ : Optional[int] = (2, 2, 18, 2) __magic_name__ : Dict = (3, 6, 12, 24) elif model_size == "base": __magic_name__ : Optional[Any] = 128 __magic_name__ : Dict = (2, 2, 18, 2) __magic_name__ : Any = (4, 8, 16, 32) else: __magic_name__ : List[Any] = 192 __magic_name__ : Any = (2, 2, 18, 2) __magic_name__ : Any = (6, 12, 24, 48) if "to" in swinva_name: __magic_name__ : int = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __magic_name__ : Tuple = 21841 __magic_name__ : Union[str, Any] = "huggingface/label-files" __magic_name__ : List[Any] = "imagenet-22k-id2label.json" __magic_name__ : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) __magic_name__ : List[Any] = {int(A_ ): v for k, v in idalabel.items()} __magic_name__ : Union[str, Any] = idalabel __magic_name__ : Tuple = {v: k for k, v in idalabel.items()} else: __magic_name__ : Union[str, Any] = 1000 __magic_name__ : Union[str, Any] = "huggingface/label-files" __magic_name__ : int = "imagenet-1k-id2label.json" __magic_name__ : Optional[Any] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) __magic_name__ : Any = {int(A_ ): v for k, v in idalabel.items()} __magic_name__ : int = idalabel __magic_name__ : Optional[Any] = {v: k for k, v in idalabel.items()} __magic_name__ : str = img_size __magic_name__ : Union[str, Any] = num_classes __magic_name__ : Union[str, Any] = embed_dim __magic_name__ : Optional[Any] = depths __magic_name__ : Optional[int] = num_heads __magic_name__ : int = window_size return config def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]: '''simple docstring''' if "patch_embed.proj" in name: __magic_name__ : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __magic_name__ : int = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __magic_name__ : Optional[Any] = "encoder." + name if "attn.proj" in name: __magic_name__ : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: __magic_name__ : List[Any] = name.replace("attn" , "attention.self" ) if "norm1" in name: __magic_name__ : List[str] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: __magic_name__ : int = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: __magic_name__ : int = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: __magic_name__ : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: __magic_name__ : Dict = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: __magic_name__ : Any = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: __magic_name__ : List[str] = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: __magic_name__ : Tuple = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": __magic_name__ : int = "layernorm.weight" if name == "norm.bias": __magic_name__ : int = "layernorm.bias" if "head" in name: __magic_name__ : List[Any] = name.replace("head" , "classifier" ) else: __magic_name__ : List[Any] = "swinv2." + name return name def lowerCAmelCase_ ( _snake_case : int , _snake_case : Tuple ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __magic_name__ : List[Any] = orig_state_dict.pop(A_ ) if "mask" in key: continue elif "qkv" in key: __magic_name__ : List[str] = key.split("." ) __magic_name__ : Optional[int] = int(key_split[1] ) __magic_name__ : Union[str, Any] = int(key_split[3] ) __magic_name__ : Tuple = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __magic_name__ : int = val[:dim, :] __magic_name__ : Union[str, Any] = val[dim : dim * 2, :] __magic_name__ : List[str] = val[-dim:, :] else: __magic_name__ : str = val[:dim] __magic_name__ : str = val[ dim : dim * 2 ] __magic_name__ : str = val[-dim:] else: __magic_name__ : List[Any] = val return orig_state_dict def lowerCAmelCase_ ( _snake_case : str , _snake_case : int ) -> Optional[int]: '''simple docstring''' __magic_name__ : Union[str, Any] = timm.create_model(A_ , pretrained=A_ ) timm_model.eval() __magic_name__ : str = get_swinva_config(A_ ) __magic_name__ : Dict = SwinvaForImageClassification(A_ ) model.eval() __magic_name__ : Tuple = convert_state_dict(timm_model.state_dict() , A_ ) model.load_state_dict(A_ ) __magic_name__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" __magic_name__ : Optional[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) __magic_name__ : Optional[int] = Image.open(requests.get(A_ , stream=A_ ).raw ) __magic_name__ : int = image_processor(images=A_ , return_tensors="pt" ) __magic_name__ : Tuple = timm_model(inputs["pixel_values"] ) __magic_name__ : Optional[Any] = model(**A_ ).logits assert torch.allclose(A_ , A_ , atol=1E-3 ) print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) model.push_to_hub( repo_path_or_name=Path(A_ , A_ ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swinv2_name", default="swinv2_tiny_patch4_window8_256", type=str, help="Name of the Swinv2 timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) snake_case : Optional[int] = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
281
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy __a :Dict = logging.get_logger(__name__) class _a ( _a ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float , **UpperCAmelCase : Optional[int] ): A_ = feature_size A_ = sampling_rate A_ = padding_value A_ = kwargs.pop("padding_side" , "right" ) A_ = kwargs.pop("return_attention_mask" , __UpperCAmelCase ) super().__init__(**__UpperCAmelCase ) def __A ( self : Union[str, Any] , UpperCAmelCase : Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A_ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" f''' to this method that includes {self.model_input_names[0]}, but you provided''' f''' {list(processed_features.keys() )}''' ) A_ = processed_features[self.model_input_names[0]] A_ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(__UpperCAmelCase ) == 0: if return_attention_mask: A_ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A_ = required_input[0] if isinstance(__UpperCAmelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A_ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(__UpperCAmelCase ): A_ = required_input[index][0] if return_tensors is None: if is_tf_tensor(__UpperCAmelCase ): A_ = "tf" elif is_torch_tensor(__UpperCAmelCase ): A_ = "pt" elif isinstance(__UpperCAmelCase , (int, float, list, tuple, np.ndarray) ): A_ = "np" else: raise ValueError( f'''type of {first_element} unknown: {type(__UpperCAmelCase )}. ''' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A_ = to_numpy(__UpperCAmelCase ) else: A_ = [to_numpy(__UpperCAmelCase ) for v in value] # Convert padding_strategy in PaddingStrategy A_ = self._get_padding_strategies(padding=__UpperCAmelCase , max_length=__UpperCAmelCase ) A_ = processed_features[self.model_input_names[0]] A_ = len(__UpperCAmelCase ) if not all(len(__UpperCAmelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A_ = [] for i in range(__UpperCAmelCase ): A_ = {k: v[i] for k, v in processed_features.items()} # truncation A_ = self._truncate( __UpperCAmelCase , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , truncation=__UpperCAmelCase , ) truncated_inputs.append(__UpperCAmelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A_ = PaddingStrategy.MAX_LENGTH A_ = {} for i in range(__UpperCAmelCase ): # padding A_ = self._pad( truncated_inputs[i] , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ) for key, value in outputs.items(): if key not in batch_outputs: A_ = [] if value.dtype is np.dtype(np.floataa ): A_ = value.astype(np.floataa ) batch_outputs[key].append(__UpperCAmelCase ) return BatchFeature(__UpperCAmelCase , tensor_type=__UpperCAmelCase ) def __A ( self : List[str] , UpperCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ): A_ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A_ = len(__UpperCAmelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__UpperCAmelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A_ = np.ones(len(__UpperCAmelCase ) , dtype=np.intaa ) if needs_to_be_padded: A_ = max_length - len(__UpperCAmelCase ) if self.padding_side == "right": if return_attention_mask: A_ = np.pad( processed_features["attention_mask"] , (0, difference) ) A_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A_ = np.pad( __UpperCAmelCase , __UpperCAmelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A_ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A_ = np.pad( __UpperCAmelCase , __UpperCAmelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def __A ( self : List[str] , UpperCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A_ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A_ = len(__UpperCAmelCase ) > max_length if needs_to_be_truncated: A_ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A_ = processed_features["attention_mask"][:max_length] return processed_features def __A ( self : Dict , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[int]=None ): # Get padding strategy if padding is not False: if padding is True: A_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ): A_ = PaddingStrategy(__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): A_ = padding else: A_ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
312
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0
def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Optional[Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" SCREAMING_SNAKE_CASE : int = "" SCREAMING_SNAKE_CASE : Any = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(A_) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring SCREAMING_SNAKE_CASE : int = 0, 0 # length[i] shows the length of palindromic substring with center i SCREAMING_SNAKE_CASE : Optional[Any] = [1 for i in range(len(A_))] # for each character in new_string find corresponding palindromic string SCREAMING_SNAKE_CASE : Optional[Any] = 0 for j in range(len(A_)): SCREAMING_SNAKE_CASE : Any = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1) while ( j - k >= 0 and j + k < len(A_) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 SCREAMING_SNAKE_CASE : Optional[int] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: SCREAMING_SNAKE_CASE : List[str] = j - k + 1 # noqa: E741 SCREAMING_SNAKE_CASE : Union[str, Any] = j + k - 1 # update max_length and start position if max_length < length[j]: SCREAMING_SNAKE_CASE : Optional[int] = length[j] SCREAMING_SNAKE_CASE : List[str] = j # create that string SCREAMING_SNAKE_CASE : Dict = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
0
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase ( _a ): def __init__( self : Union[str, Any], __A : Dict, __A : Any=1_3, __A : Optional[int]=7, __A : List[Any]=True, __A : Dict=True, __A : str=True, __A : Union[str, Any]=True, __A : Union[str, Any]=9_9, __A : Any=3_2, __A : Optional[Any]=5, __A : Dict=4, __A : str=3_7, __A : str="gelu", __A : List[str]=0.1, __A : int=0.1, __A : str=5_1_2, __A : int=1_6, __A : Dict=2, __A : Optional[Any]=0.0_2, __A : str=False, __A : Any=True, __A : Optional[int]="None", __A : Tuple=3, __A : Dict=4, __A : List[Any]=None, ): UpperCAmelCase : Tuple = parent UpperCAmelCase : Dict = batch_size UpperCAmelCase : List[Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Optional[Any] = use_input_mask UpperCAmelCase : List[Any] = use_token_type_ids UpperCAmelCase : Any = use_labels UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : str = hidden_act UpperCAmelCase : Optional[int] = hidden_dropout_prob UpperCAmelCase : Any = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : str = type_vocab_size UpperCAmelCase : List[Any] = type_sequence_label_size UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : Any = num_labels UpperCAmelCase : Dict = num_choices UpperCAmelCase : Any = relative_attention UpperCAmelCase : Union[str, Any] = position_biased_input UpperCAmelCase : Tuple = pos_att_type UpperCAmelCase : List[Any] = scope def __magic_name__ ( self : int ): UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) UpperCAmelCase : Dict = None if self.use_token_type_ids: UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase : Optional[Any] = None UpperCAmelCase : Dict = None UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) UpperCAmelCase : Tuple = ids_tensor([self.batch_size], self.num_choices ) UpperCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : Tuple ): return DebertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, ) def __magic_name__ ( self : Tuple ): UpperCAmelCase : Any = self.get_config() UpperCAmelCase : int = 3_0_0 return config def __magic_name__ ( self : Dict, __A : List[str] ): self.parent.assertListEqual(list(result.loss.size() ), [] ) def __magic_name__ ( self : Any, __A : str, __A : Tuple, __A : Optional[int], __A : str, __A : Dict, __A : str, __A : Optional[int] ): UpperCAmelCase : Dict = DebertaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase : Optional[int] = model(__UpperCAmelCase, attention_mask=__UpperCAmelCase, token_type_ids=__UpperCAmelCase )[0] UpperCAmelCase : Any = model(__UpperCAmelCase, token_type_ids=__UpperCAmelCase )[0] UpperCAmelCase : Any = model(__UpperCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] ) def __magic_name__ ( self : Optional[Any], __A : Union[str, Any], __A : List[Any], __A : Union[str, Any], __A : int, __A : Any, __A : Any, __A : int ): UpperCAmelCase : Any = DebertaForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase, attention_mask=__UpperCAmelCase, token_type_ids=__UpperCAmelCase, labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : List[Any], __A : List[str], __A : Optional[int], __A : Optional[int], __A : int, __A : Any, __A : Optional[Any], __A : Union[str, Any] ): UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Union[str, Any] = DebertaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase : Tuple = model(__UpperCAmelCase, attention_mask=__UpperCAmelCase, token_type_ids=__UpperCAmelCase, labels=__UpperCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] ) self.check_loss_output(__UpperCAmelCase ) def __magic_name__ ( self : Dict, __A : int, __A : Dict, __A : Optional[Any], __A : List[str], __A : Optional[int], __A : Any, __A : str ): UpperCAmelCase : Optional[Any] = self.num_labels UpperCAmelCase : Tuple = DebertaForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase, attention_mask=__UpperCAmelCase, token_type_ids=__UpperCAmelCase, labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ ( self : Tuple, __A : List[str], __A : Dict, __A : Union[str, Any], __A : Optional[int], __A : List[str], __A : str, __A : Optional[Any] ): UpperCAmelCase : Optional[int] = DebertaForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase : Optional[int] = model( __UpperCAmelCase, attention_mask=__UpperCAmelCase, token_type_ids=__UpperCAmelCase, start_positions=__UpperCAmelCase, end_positions=__UpperCAmelCase, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def __magic_name__ ( self : int ): UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( UpperCAmelCase ) : Tuple = config_and_inputs UpperCAmelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __UpperCAmelCase ( _a , _a , unittest.TestCase ): UpperCamelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase = True UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def __magic_name__ ( self : List[str] ): UpperCAmelCase : int = DebertaModelTester(self ) UpperCAmelCase : Optional[int] = ConfigTester(self, config_class=__UpperCAmelCase, hidden_size=3_7 ) def __magic_name__ ( self : Tuple ): self.config_tester.run_common_tests() def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase ) def __magic_name__ ( self : int ): UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase ) def __magic_name__ ( self : str ): UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase ) @slow def __magic_name__ ( self : str ): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : str = DebertaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCAmelCase ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def __magic_name__ ( self : int ): pass @slow def __magic_name__ ( self : int ): UpperCAmelCase : Dict = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) UpperCAmelCase : Optional[Any] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase : int = model(__UpperCAmelCase, attention_mask=__UpperCAmelCase )[0] # compare the actual values for a slice. UpperCAmelCase : Optional[int] = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __UpperCAmelCase, atol=1E-4 ), F'''{output[:, 1:4, 1:4]}''' )
336
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @property def lowerCamelCase ( self : Tuple ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : Optional[int] = ort.SessionOptions() lowerCAmelCase_ : str = False return options def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) lowerCAmelCase_ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) lowerCAmelCase_ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default lowerCAmelCase_ : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase_ : int = "A red cat sitting on a park bench" lowerCAmelCase_ : Optional[Any] = np.random.RandomState(0 ) lowerCAmelCase_ : Optional[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCAmelCase , output_type="np" , ) lowerCAmelCase_ : str = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-2
241
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Any = filter(lambda SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() ) __UpperCamelCase :Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] ) return params __lowercase = logging.getLogger(__name__) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if metric == "rouge2": __UpperCamelCase :Union[str, Any] = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __UpperCamelCase :Any = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __UpperCamelCase :Optional[int] = "{val_avg_em:.4f}-{step_count}" elif metric == "loss": __UpperCamelCase :Optional[Any] = "{val_avg_loss:.4f}-{step_count}" else: raise NotImplementedError( f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''' ) __UpperCamelCase :Union[str, Any] = ModelCheckpoint( dirpath=A_ , filename=A_ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' return EarlyStopping( monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=A_ , verbose=A_ , ) class lowerCamelCase_ ( pl.Callback ): '''simple docstring''' def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[Any]: __UpperCamelCase :Optional[Any] = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} pl_module.logger.log_metrics(__UpperCAmelCase) @rank_zero_only def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase=True) -> int: logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""") __UpperCamelCase :Dict = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']}) # Log results __UpperCamelCase :Optional[Any] = Path(pl_module.hparams.output_dir) if type_path == "test": __UpperCamelCase :str = od / "test_results.txt" __UpperCamelCase :str = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __UpperCamelCase :str = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __UpperCamelCase :List[Any] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=__UpperCAmelCase) generations_file.parent.mkdir(exist_ok=__UpperCAmelCase) with open(__UpperCAmelCase , '''a+''') as writer: for key in sorted(__UpperCAmelCase): if key in ["log", "progress_bar", "preds"]: continue __UpperCamelCase :List[str] = metrics[key] if isinstance(__UpperCAmelCase , torch.Tensor): __UpperCamelCase :str = val.item() __UpperCamelCase :Dict = f"""{key}: {val:.6f}\n""" writer.write(__UpperCAmelCase) if not save_generations: return if "preds" in metrics: __UpperCamelCase :Any = "\n".join(metrics['''preds''']) generations_file.open('''w+''').write(__UpperCAmelCase) @rank_zero_only def UpperCamelCase__ ( self , __lowercase , __lowercase) -> int: try: __UpperCamelCase :Optional[Any] = pl_module.model.model.num_parameters() except AttributeError: __UpperCamelCase :Any = pl_module.model.num_parameters() __UpperCamelCase :Dict = count_trainable_parameters(__UpperCAmelCase) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6}) @rank_zero_only def UpperCamelCase__ ( self , __lowercase , __lowercase) -> int: save_json(pl_module.metrics , pl_module.metrics_save_path) return self._write_logs(__UpperCAmelCase , __UpperCAmelCase , '''test''') @rank_zero_only def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Any: save_json(pl_module.metrics , pl_module.metrics_save_path) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
43
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
0
"""simple docstring""" from typing import Any class snake_case_: def __init__( self : Any , UpperCamelCase_ : Any ): lowerCAmelCase : Optional[Any] = data lowerCAmelCase : Optional[int] = None def __repr__( self : str ): return F'''Node({self.data})''' class snake_case_: def __init__( self : str ): lowerCAmelCase : Dict = None def __iter__( self : int ): lowerCAmelCase : str = self.head while node: yield node.data lowerCAmelCase : Union[str, Any] = node.next def __len__( self : Any ): return sum(1 for _ in self ) def __repr__( self : Any ): return "->".join([str(__UpperCAmelCase ) for item in self] ) def __getitem__( self : int , UpperCamelCase_ : int ): if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any ): if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) lowerCAmelCase : List[Any] = self.head for _ in range(__UpperCAmelCase ): lowerCAmelCase : Dict = current.next lowerCAmelCase : Optional[Any] = data def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any ): self.insert_nth(len(self ) , __UpperCAmelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any ): self.insert_nth(0 , __UpperCAmelCase ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any ): if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) lowerCAmelCase : Any = Node(__UpperCAmelCase ) if self.head is None: lowerCAmelCase : int = new_node elif index == 0: lowerCAmelCase : Dict = self.head # link new_node to head lowerCAmelCase : Union[str, Any] = new_node else: lowerCAmelCase : List[str] = self.head for _ in range(index - 1 ): lowerCAmelCase : int = temp.next lowerCAmelCase : Optional[int] = temp.next lowerCAmelCase : int = new_node def lowerCamelCase__ ( self : int ): # print every node data print(self ) def lowerCamelCase__ ( self : Optional[Any] ): return self.delete_nth(0 ) def lowerCamelCase__ ( self : Any ): # delete from tail return self.delete_nth(len(self ) - 1 ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) lowerCAmelCase : Optional[Any] = self.head # default first node if index == 0: lowerCAmelCase : int = self.head.next else: lowerCAmelCase : List[str] = self.head for _ in range(index - 1 ): lowerCAmelCase : int = temp.next lowerCAmelCase : Any = temp.next lowerCAmelCase : str = temp.next.next return delete_node.data def lowerCamelCase__ ( self : int ): return self.head is None def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : str = None lowerCAmelCase : Tuple = self.head while current: # Store the current node's next node. lowerCAmelCase : Any = current.next # Make the current node's next point backwards lowerCAmelCase : Any = prev # Make the previous node be the current node lowerCAmelCase : Optional[int] = current # Make the current node the next node (to progress iteration) lowerCAmelCase : int = next_node # Return prev in order to put the head at the end lowerCAmelCase : Dict = prev def _snake_case ( ): lowerCAmelCase : Any = LinkedList() assert linked_list.is_empty() is True assert str(A_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(A_ ) == i linked_list.insert_nth(A_ , i + 1 ) assert str(A_ ) == "->".join(str(A_ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(A_ ) == "->".join(str(A_ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(A_ ) == 9 assert str(A_ ) == "->".join(str(A_ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCAmelCase : Optional[int] = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(A_ ) == "->".join(str(A_ ) for i in range(-8 , 1 ) ) def _snake_case ( ): lowerCAmelCase : str = [ -9, 100, Node(77345112 ), "dlrow olleH", 7, 5555, 0, -192.55555, "Hello, world!", 77.9, Node(10 ), None, None, 12.20, ] lowerCAmelCase : str = LinkedList() for i in test_input: linked_list.insert_tail(A_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(A_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCAmelCase : int = linked_list.delete_head() assert result == -9 assert ( str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCAmelCase : int = linked_list.delete_tail() assert result == 12.2 assert ( str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCAmelCase : Union[str, Any] = linked_list.delete_nth(10 ) assert result is None assert ( str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(A_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(A_ ) assert ( str(A_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(A_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _snake_case ( ): from doctest import testmod testmod() lowerCAmelCase : int = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(A_ ) print('''\nReading/changing Node data using indexing:''' ) print(f'''Element at Position 1: {linked_list[1]}''' ) lowerCAmelCase : Tuple = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(A_ ) print(f'''length of linked_list is : {len(A_ )}''' ) if __name__ == "__main__": main()
60
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = '''▁''' lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} lowerCAmelCase__ = { '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } lowerCAmelCase__ = { '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } lowerCAmelCase__ = { '''ernie-m-base''': 5_1_4, '''ernie-m-large''': 5_1_4, } lowerCAmelCase__ = { '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class snake_case__(_a ): """simple docstring""" lowercase_ = ["input_ids"] lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = RESOURCE_FILES_NAMES def __init__( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Tuple="utf8" , SCREAMING_SNAKE_CASE : List[str]="[UNK]" , SCREAMING_SNAKE_CASE : Dict="[SEP]" , SCREAMING_SNAKE_CASE : Any="[PAD]" , SCREAMING_SNAKE_CASE : str="[CLS]" , SCREAMING_SNAKE_CASE : Optional[Any]="[MASK]" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : Dict , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowercase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , vocab_file=__UpperCAmelCase , encoding=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) lowercase__ : str = do_lower_case lowercase__ : Dict = sentencepiece_model_ckpt lowercase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowercase__ : Any = self.load_vocab(filepath=__UpperCAmelCase ) else: lowercase__ : int = {self.sp_model.id_to_piece(__UpperCAmelCase ): id for id in range(self.sp_model.get_piece_size() )} lowercase__ : int = {v: k for k, v in self.vocab.items()} def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[Any] ): if text is None: return None lowercase__ : Any = self.tokenize(__UpperCAmelCase ) lowercase__ : str = "", [] for i, ch in enumerate(__UpperCAmelCase ): if ch in self.SP_CHAR_MAPPING: lowercase__ : List[Any] = self.SP_CHAR_MAPPING.get(__UpperCAmelCase ) else: lowercase__ : Union[str, Any] = unicodedata.normalize("NFKC" , __UpperCAmelCase ) if self.is_whitespace(__UpperCAmelCase ): continue normalized_text += ch char_mapping.extend([i] * len(__UpperCAmelCase ) ) lowercase__ : Optional[int] = normalized_text, [], 0 if self.do_lower_case: lowercase__ : List[str] = text.lower() for token in split_tokens: if token[:1] == "▁": lowercase__ : int = token[1:] lowercase__ : Tuple = text[offset:].index(__UpperCAmelCase ) + offset lowercase__ : List[str] = start + len(__UpperCAmelCase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowercase__ : List[Any] = end return token_mapping @property def snake_case ( self : str ): return len(self.vocab ) def snake_case ( self : Optional[Any] ): return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : int ): lowercase__ : str = self.__dict__.copy() lowercase__ : Optional[Any] = None return state def __setstate__( self : str , SCREAMING_SNAKE_CASE : str ): lowercase__ : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase__ : Optional[int] = {} lowercase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] ): return "".join((self.SP_CHAR_MAPPING.get(__UpperCAmelCase , __UpperCAmelCase ) for c in text) ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Optional[Any]=64 , SCREAMING_SNAKE_CASE : Any=0.1 ): if self.sp_model_kwargs.get("enable_sampling" ) is True: lowercase__ : Union[str, Any] = True if self.sp_model_kwargs.get("alpha" ) is not None: lowercase__ : Optional[int] = self.sp_model_kwargs.get("alpha" ) if self.sp_model_kwargs.get("nbest_size" ) is not None: lowercase__ : Dict = self.sp_model_kwargs.get("nbest_size" ) if not enable_sampling: lowercase__ : int = self.sp_model.EncodeAsPieces(__UpperCAmelCase ) else: lowercase__ : List[Any] = self.sp_model.SampleEncodeAsPieces(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase__ : Optional[Any] = [] for pi, piece in enumerate(__UpperCAmelCase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__UpperCAmelCase ) and pi != 0: new_pieces.append(__UpperCAmelCase ) continue else: continue lowercase__ : Any = 0 for i, chunk in enumerate(__UpperCAmelCase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__UpperCAmelCase ) or self.is_punct(__UpperCAmelCase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__UpperCAmelCase ) lowercase__ : Optional[int] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowercase__ : int = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowercase__ : Tuple = i if len(__UpperCAmelCase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase__ : str = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip() return out_string def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase__ : Tuple = self.convert_ids_to_tokens(__UpperCAmelCase ) lowercase__ : str = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip() return out_string def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ): return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token ) ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Tuple ): return self.reverse_vocab.get(__UpperCAmelCase , self.unk_token ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ : int = [self.cls_token_id] lowercase__ : Optional[int] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict=None ): if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : int=False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ): # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__UpperCAmelCase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__UpperCAmelCase ) + 1) + [1] * (len(__UpperCAmelCase ) + 3) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] ): if "\u4e00" <= char <= "\u9fff": return True return False def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ): if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[Any] ): if char in ",;:.?!~,;:。?!《》【】": return True return False def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] ): if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__UpperCAmelCase ) == 1: lowercase__ : Optional[Any] = unicodedata.category(__UpperCAmelCase ) if cat == "Zs": return True return False def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Optional[int] = {} with io.open(__UpperCAmelCase , "r" , encoding="utf-8" ) as f: for index, line in enumerate(__UpperCAmelCase ): lowercase__ : Tuple = line.rstrip("\n" ) lowercase__ : Any = int(__UpperCAmelCase ) return token_to_idx def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ): lowercase__ : int = 0 if os.path.isdir(__UpperCAmelCase ): lowercase__ : Union[str, Any] = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: lowercase__ : Tuple = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) lowercase__ : Union[str, Any] = token_index writer.write(token + "\n" ) index += 1 lowercase__ : Union[str, Any] = os.path.join(__UpperCAmelCase , "sentencepiece.bpe.model" ) with open(__UpperCAmelCase , "wb" ) as fi: lowercase__ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (vocab_file,)
130
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" class A_ : """simple docstring""" def __init__( self :List[Any] , lowercase_ :Optional[Any] ) -> Any: UpperCAmelCase = val UpperCAmelCase = None UpperCAmelCase = None def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[Any] ) -> int: if self.val: if val < self.val: if self.left is None: UpperCAmelCase = Node(__UpperCAmelCase ) else: self.left.insert(__UpperCAmelCase ) elif val > self.val: if self.right is None: UpperCAmelCase = Node(__UpperCAmelCase ) else: self.right.insert(__UpperCAmelCase ) else: UpperCAmelCase = val def _lowerCAmelCase ( lowercase_ , lowercase_ ): if root: inorder(root.left , A_ ) res.append(root.val ) inorder(root.right , A_ ) def _lowerCAmelCase ( lowercase_ ): if len(A_ ) == 0: return arr UpperCAmelCase = Node(arr[0] ) for i in range(1 , len(A_ ) ): root.insert(arr[i] ) # Traverse BST in order. UpperCAmelCase = [] inorder(A_ , A_ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
78
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) __lowerCAmelCase = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": A : str = input("Enter a string ").strip() A : Dict = is_isogram(input_str) print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
57
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
0
from math import factorial def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Tuple ) -> int: '''simple docstring''' if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(A_ ) // (factorial(A_ ) * factorial(n - k )) if __name__ == "__main__": print( "The number of five-card hands possible from a standard", F"fifty-two card deck is: {combinations(52, 5)}\n", ) print( "If a class of 40 students must be arranged into groups of", F"4 for group projects, there are {combinations(40, 4)} ways", "to arrange them.\n", ) print( "If 10 teams are competing in a Formula One race, there", F"are {combinations(10, 3)} ways that first, second and", "third place can be awarded.", )
281
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" ,set() ) @pytest.fixture def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : int ): A_ = metric_id class _a : """simple docstring""" _lowerCamelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __A ( self : List[str] ): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" ,HfhMock() ) @pytest.mark.parametrize( "func, args" ,[(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" if "tmp_path" in args: A_ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ ,match="https://huggingface.co/docs/evaluate" ): func(*A_ )
312
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
0
import argparse import struct import unittest class _UpperCamelCase : '''simple docstring''' def __init__( self : List[Any] , a : bytes ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = data # Initialize hash values SCREAMING_SNAKE_CASE : Union[str, Any] = [ 0x6A_09E_667, 0xBB_67A_E85, 0x3C_6EF_372, 0xA5_4FF_53A, 0x51_0E5_27F, 0x9B_056_88C, 0x1F_83D_9AB, 0x5B_E0C_D19, ] # Initialize round constants SCREAMING_SNAKE_CASE : Optional[int] = [ 0x42_8A2_F98, 0x71_374_491, 0xB5_C0F_BCF, 0xE9_B5D_BA5, 0x39_56C_25B, 0x59_F11_1F1, 0x92_3F8_2A4, 0xAB_1C5_ED5, 0xD8_07A_A98, 0x12_835_B01, 0x24_318_5BE, 0x55_0C7_DC3, 0x72_BE5_D74, 0x80_DEB_1FE, 0x9B_DC0_6A7, 0xC1_9BF_174, 0xE4_9B6_9C1, 0xEF_BE4_786, 0x0F_C19_DC6, 0x24_0CA_1CC, 0x2D_E92_C6F, 0x4A_748_4AA, 0x5C_B0A_9DC, 0x76_F98_8DA, 0x98_3E5_152, 0xA8_31C_66D, 0xB0_032_7C8, 0xBF_597_FC7, 0xC6_E00_BF3, 0xD5_A79_147, 0x06_CA6_351, 0x14_292_967, 0x27_B70_A85, 0x2E_1B2_138, 0x4D_2C6_DFC, 0x53_380_D13, 0x65_0A7_354, 0x76_6A0_ABB, 0x81_C2C_92E, 0x92_722_C85, 0xA2_BFE_8A1, 0xA8_1A6_64B, 0xC2_4B8_B70, 0xC7_6C5_1A3, 0xD1_92E_819, 0xD6_990_624, 0xF4_0E3_585, 0x10_6AA_070, 0x19_A4C_116, 0x1E_376_C08, 0x27_487_74C, 0x34_B0B_CB5, 0x39_1C0_CB3, 0x4E_D8A_A4A, 0x5B_9CC_A4F, 0x68_2E6_FF3, 0x74_8F8_2EE, 0x78_A56_36F, 0x84_C87_814, 0x8C_C70_208, 0x90_BEF_FFA, 0xA4_506_CEB, 0xBE_F9A_3F7, 0xC6_717_8F2, ] SCREAMING_SNAKE_CASE : Tuple = self.preprocessing(self.data ) self.final_hash() @staticmethod def __UpperCamelCase ( a : bytes ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = B"\x80" + (B"\x00" * (63 - (len(__UpperCAmelCase ) + 8) % 64)) SCREAMING_SNAKE_CASE : Dict = struct.pack(">Q" , (len(__UpperCAmelCase ) * 8) ) return data + padding + big_endian_integer def __UpperCamelCase ( self : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers SCREAMING_SNAKE_CASE : Optional[Any] = list(struct.unpack(">16L" , __UpperCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 SCREAMING_SNAKE_CASE : Dict = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array SCREAMING_SNAKE_CASE : List[Any] = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) SCREAMING_SNAKE_CASE : Optional[Any] = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) SCREAMING_SNAKE_CASE : Tuple = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100_000_000 # Compression SCREAMING_SNAKE_CASE : Union[str, Any] = self.ror(__UpperCAmelCase , 6 ) ^ self.ror(__UpperCAmelCase , 11 ) ^ self.ror(__UpperCAmelCase , 25 ) SCREAMING_SNAKE_CASE : str = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g) SCREAMING_SNAKE_CASE : Tuple = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100_000_000 SCREAMING_SNAKE_CASE : List[Any] = self.ror(__UpperCAmelCase , 2 ) ^ self.ror(__UpperCAmelCase , 13 ) ^ self.ror(__UpperCAmelCase , 22 ) SCREAMING_SNAKE_CASE : Union[str, Any] = (a & b) ^ (a & c) ^ (b & c) SCREAMING_SNAKE_CASE : List[Any] = (sa + maj) % 0x100_000_000 SCREAMING_SNAKE_CASE : Optional[Any] = ( g, f, e, ((d + tempa) % 0x100_000_000), c, b, a, ((tempa + tempa) % 0x100_000_000), ) SCREAMING_SNAKE_CASE : Dict = [a, b, c, d, e, f, g, h] # Modify final values SCREAMING_SNAKE_CASE : str = [ ((element + mutated_hash_values[index]) % 0x100_000_000) for index, element in enumerate(self.hashes ) ] SCREAMING_SNAKE_CASE : Dict = "".join([hex(__UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def __UpperCamelCase ( self : Optional[Any] , a : int , a : int ) -> Tuple: """simple docstring""" return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations) class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" import hashlib SCREAMING_SNAKE_CASE : int = bytes("Test String" , "utf-8" ) self.assertEqual(SHAaaa(__UpperCAmelCase ).hash , hashlib.shaaaa(__UpperCAmelCase ).hexdigest() ) def lowerCamelCase__ ( ): import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument( "-f" , "--file" , dest="input_file" , help="Hash contents of a file") SCREAMING_SNAKE_CASE : int = parser.parse_args() SCREAMING_SNAKE_CASE : List[Any] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , "rb") as f: SCREAMING_SNAKE_CASE : Tuple = f.read() else: SCREAMING_SNAKE_CASE : Optional[int] = bytes(A_ , "utf-8") print(SHAaaa(A_).hash) if __name__ == "__main__": main()
76
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCamelCase : Tuple = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[str] = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys _lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
336
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
0
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" lowerCAmelCase_ : Tuple = FileLock(str(tmpdir / "foo.lock" ) ) lowerCAmelCase_ : Tuple = FileLock(str(tmpdir / "foo.lock" ) ) lowerCAmelCase_ : List[str] = 0.01 with locka.acquire(): with pytest.raises(A_ ): lowerCAmelCase_ : Tuple = time.time() locka.acquire(A_ ) assert time.time() - _start > timeout def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ : Optional[Any] = "a" * 1000 + ".lock" lowerCAmelCase_ : Dict = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(A_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 lowerCAmelCase_ : int = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(A_ ): locka.acquire(0 )
241
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class A__ ( _a ): lowerCAmelCase__ : List[str] = ["""image_processor""", """tokenizer"""] lowerCAmelCase__ : str = """AutoImageProcessor""" lowerCAmelCase__ : Optional[int] = """AutoTokenizer""" def __init__( self : str , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : Dict ) -> List[Any]: """simple docstring""" __lowercase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCAmelCase , ) __lowercase = kwargs.pop('feature_extractor' ) __lowercase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) __lowercase = self.image_processor __lowercase = False def __call__( self : Optional[int] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : int ) -> str: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase ) __lowercase = kwargs.pop('images' , __UpperCAmelCase ) __lowercase = kwargs.pop('text' , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: __lowercase = args[0] __lowercase = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: __lowercase = self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: __lowercase = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: __lowercase = encodings["input_ids"] return inputs def a__ ( self : Optional[int] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def a__ ( self : str , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Dict ) -> List[Any]: """simple docstring""" return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @contextmanager def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your images inputs, or in a separate call.' ) __lowercase = True __lowercase = self.tokenizer yield __lowercase = self.image_processor __lowercase = False def a__ ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : str=None ) -> Optional[Any]: """simple docstring""" if added_vocab is None: __lowercase = self.tokenizer.get_added_vocab() __lowercase = {} while tokens: __lowercase = re.search(R'<s_(.*?)>' , __UpperCAmelCase , re.IGNORECASE ) if start_token is None: break __lowercase = start_token.group(1 ) __lowercase = re.search(Rf"""</s_{key}>""" , __UpperCAmelCase , re.IGNORECASE ) __lowercase = start_token.group() if end_token is None: __lowercase = tokens.replace(__UpperCAmelCase , '' ) else: __lowercase = end_token.group() __lowercase = re.escape(__UpperCAmelCase ) __lowercase = re.escape(__UpperCAmelCase ) __lowercase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCAmelCase , re.IGNORECASE ) if content is not None: __lowercase = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __lowercase = self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase ) if value: if len(__UpperCAmelCase ) == 1: __lowercase = value[0] __lowercase = value else: # leaf nodes __lowercase = [] for leaf in content.split(R'<sep/>' ): __lowercase = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __lowercase = leaf[1:-2] # for categorical special tokens output[key].append(__UpperCAmelCase ) if len(output[key] ) == 1: __lowercase = output[key][0] __lowercase = tokens[tokens.find(__UpperCAmelCase ) + len(__UpperCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase ) if len(__UpperCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def a__ ( self : str ) -> Optional[Any]: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , ) return self.image_processor_class @property def a__ ( self : int ) -> int: """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , ) return self.image_processor
325
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if not isinstance(A_ , A_ ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
43
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
0
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging snake_case__ : Union[str, Any] = logging.get_logger(__name__) class snake_case_( _a ): def __init__( self : str , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Union[str, Any] ): warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , __UpperCAmelCase , ) super().__init__(args=__UpperCAmelCase , **__UpperCAmelCase )
60
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
0
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class snake_case__: """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int = 13 , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 128 , SCREAMING_SNAKE_CASE : Dict=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE : int = 7 , SCREAMING_SNAKE_CASE : int = 4 , SCREAMING_SNAKE_CASE : int = 37 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 10 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 128 , SCREAMING_SNAKE_CASE : List[int] = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , ): lowercase__ : int = parent lowercase__ : List[str] = batch_size lowercase__ : List[Any] = image_size lowercase__ : Union[str, Any] = patch_size lowercase__ : Any = num_channels lowercase__ : Union[str, Any] = is_training lowercase__ : Union[str, Any] = use_labels lowercase__ : Tuple = hidden_size lowercase__ : Tuple = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : int = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : Optional[int] = attention_probs_dropout_prob lowercase__ : List[str] = type_sequence_label_size lowercase__ : str = initializer_range lowercase__ : Optional[int] = encoder_stride lowercase__ : Optional[int] = num_attention_outputs lowercase__ : Any = embed_dim lowercase__ : Dict = embed_dim + 1 lowercase__ : Union[str, Any] = resolution lowercase__ : Optional[int] = depths lowercase__ : Union[str, Any] = hidden_sizes lowercase__ : Dict = dim lowercase__ : List[str] = mlp_expansion_ratio def snake_case ( self : Dict ): lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : str = None if self.use_labels: lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : List[str] = self.get_config() return config, pixel_values, labels def snake_case ( self : Optional[int] ): return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Optional[Any] = TFEfficientFormerModel(config=__UpperCAmelCase ) lowercase__ : int = model(__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Optional[int] = self.type_sequence_label_size lowercase__ : Union[str, Any] = TFEfficientFormerForImageClassification(__UpperCAmelCase ) lowercase__ : Dict = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ : Union[str, Any] = 1 lowercase__ : Any = TFEfficientFormerForImageClassification(__UpperCAmelCase ) lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case ( self : Tuple ): lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() lowercase__ : List[str] = config_and_inputs lowercase__ : int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case__(_a , _a , unittest.TestCase ): """simple docstring""" lowercase_ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) lowercase_ = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[Any] ): lowercase__ : Any = TFEfficientFormerModelTester(self ) lowercase__ : Optional[Any] = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def snake_case ( self : str ): self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds" ) def snake_case ( self : int ): pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings" ) def snake_case ( self : Optional[Any] ): pass def snake_case ( self : Optional[Any] ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Dict = model_class(__UpperCAmelCase ) lowercase__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : List[str] = [*signature.parameters.keys()] lowercase__ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def snake_case ( self : str ): def check_hidden_states_output(SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : int = model_class(__UpperCAmelCase ) lowercase__ : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase ) lowercase__ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Optional[Any] = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) if hasattr(self.model_tester , "encoder_seq_length" ): lowercase__ : Optional[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1: lowercase__ : Any = seq_length * self.model_tester.chunk_length else: lowercase__ : str = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: lowercase__ : Union[str, Any] = outputs.decoder_hidden_states self.asseretIsInstance(__UpperCAmelCase , (list, tuple) ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) lowercase__ : int = getattr(self.model_tester , "seq_length" , __UpperCAmelCase ) lowercase__ : Any = getattr(self.model_tester , "decoder_seq_length" , __UpperCAmelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Optional[Any] = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=False ): lowercase__ : str = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def snake_case ( self : str ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" ) def snake_case ( self : Union[str, Any] ): lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def snake_case ( self : str ): lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def snake_case ( self : int ): for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Optional[int] = TFEfficientFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def snake_case ( self : Union[str, Any] ): lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Dict = True lowercase__ : Dict = getattr(self.model_tester , "seq_length" , __UpperCAmelCase ) lowercase__ : List[str] = getattr(self.model_tester , "encoder_seq_length" , __UpperCAmelCase ) lowercase__ : List[Any] = getattr(self.model_tester , "key_length" , __UpperCAmelCase ) lowercase__ : List[str] = getattr(self.model_tester , "chunk_length" , __UpperCAmelCase ) if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ): lowercase__ : Tuple = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : Dict = False lowercase__ : Union[str, Any] = True lowercase__ : Any = model_class(__UpperCAmelCase ) lowercase__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase ) lowercase__ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : str = True lowercase__ : Dict = model_class(__UpperCAmelCase ) lowercase__ : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase ) lowercase__ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def snake_case ( self : Optional[int] ): # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model lowercase__ : Any = model_class(__UpperCAmelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes lowercase__ : str = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__UpperCAmelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } lowercase__ : Optional[int] = model(__UpperCAmelCase ) self.assertTrue(outputs_dict is not None ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : Tuple ): return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" ) if is_vision_available() else None ) @slow def snake_case ( self : Optional[int] ): lowercase__ : Any = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" ) lowercase__ : Optional[int] = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Optional[Any] = image_processor(images=__UpperCAmelCase , return_tensors="tf" ) # forward pass lowercase__ : Tuple = model(**__UpperCAmelCase , training=__UpperCAmelCase ) # verify the logits lowercase__ : str = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) lowercase__ : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def snake_case ( self : Optional[Any] ): lowercase__ : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) lowercase__ : int = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : str = image_processor(images=__UpperCAmelCase , return_tensors="tf" ) # forward pass lowercase__ : Union[str, Any] = model(**__UpperCAmelCase , training=__UpperCAmelCase ) # verify the logits lowercase__ : str = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) lowercase__ : Any = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
130
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
"""simple docstring""" snake_case_ = { """joule""": 1.0, """kilojoule""": 1000, """megajoule""": 100_0000, """gigajoule""": 10_0000_0000, """wattsecond""": 1.0, """watthour""": 3600, """kilowatthour""": 360_0000, """newtonmeter""": 1.0, """calorie_nutr""": 4186.8, """kilocalorie_nutr""": 4186800.00, """electronvolt""": 1.602176634e-19, """britishthermalunit_it""": 1055.05585, """footpound""": 1.355818, } def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: UpperCAmelCase = ( F"""Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(A_ )}""" ) raise ValueError(A_ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
78
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): A : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right A : List[Any] = 1_2_8_0_2_2 A : Any = 1_2_8_0_2_8 @require_sentencepiece class _UpperCamelCase ( _a ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[Any] =MaMaaaTokenizer __UpperCAmelCase : List[str] =False __UpperCAmelCase : Tuple =False __UpperCAmelCase : str =True def snake_case ( self ): super().setUp() __lowerCAmelCase = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] __lowerCAmelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __lowerCAmelCase = Path(self.tmpdirname ) save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] ) __lowerCAmelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self , **__a ): return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def snake_case ( self , __a ): return ( "This is a test", "This is a test", ) def snake_case ( self ): __lowerCAmelCase = "</s>" __lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def snake_case ( self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(__UpperCAmelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def snake_case ( self ): pass def snake_case ( self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer.tokenize("This is a test" ) self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2, 3, 4, 5, 6] , ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) __lowerCAmelCase = tokenizer.convert_tokens_to_string(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , "This is a test" ) @slow def snake_case ( self ): # fmt: off __lowerCAmelCase = {"input_ids": [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] ="""facebook/m2m100_418M""" __UpperCAmelCase : Union[str, Any] =[ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] __UpperCAmelCase : Dict =[ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off __UpperCAmelCase : List[str] =[EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def snake_case ( cls ): __lowerCAmelCase = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) __lowerCAmelCase = 1 return cls def snake_case ( self ): self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_80_06 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_80_22 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_80_76 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_80_63 ) def snake_case ( self ): __lowerCAmelCase = self.tokenizer.get_vocab() self.assertEqual(len(__UpperCAmelCase ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , __UpperCAmelCase ) def snake_case ( self ): __lowerCAmelCase = "en" __lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase ) def snake_case ( self ): self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids ) # fmt: off __lowerCAmelCase = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2] # fmt: on __lowerCAmelCase = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase ) def snake_case ( self ): __lowerCAmelCase = tempfile.mkdtemp() __lowerCAmelCase = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(__UpperCAmelCase ) __lowerCAmelCase = MaMaaaTokenizer.from_pretrained(__UpperCAmelCase ) self.assertDictEqual(new_tok.lang_token_to_id , __UpperCAmelCase ) @require_torch def snake_case ( self ): __lowerCAmelCase = "en" __lowerCAmelCase = "fr" __lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt" ) __lowerCAmelCase = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: __lowerCAmelCase = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def snake_case ( self ): __lowerCAmelCase = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) __lowerCAmelCase = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def snake_case ( self ): __lowerCAmelCase = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) __lowerCAmelCase = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def snake_case ( self ): __lowerCAmelCase = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , { # en_XX, A, test, EOS "input_ids": [[12_80_22, 58, 41_83, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 12_80_06, } , )
57
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _snake_case ( _a ): def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Any = tempfile.mkdtemp() __magic_name__ : str = 5 # Realm tok __magic_name__ : Dict = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __magic_name__ : Tuple = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) __magic_name__ : Optional[Any] = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) __magic_name__ : Dict = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def SCREAMING_SNAKE_CASE ( self ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Tuple = RealmConfig(num_block_records=self.num_block_records ) return config def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[Any] = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__UpperCAmelCase , ) return block_records def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = self.get_config() __magic_name__ : List[str] = self.get_dummy_retriever() __magic_name__ : Optional[Any] = retriever.tokenizer __magic_name__ : int = np.array([0, 3] , dtype="long" ) __magic_name__ : List[str] = tokenizer(["Test question"] ).input_ids __magic_name__ : Union[str, Any] = tokenizer( ["the fourth"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids __magic_name__ : List[str] = config.reader_seq_len __magic_name__ : Optional[Any] = retriever( __UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Union[str, Any] = self.get_config() __magic_name__ : List[str] = self.get_dummy_retriever() __magic_name__ : str = retriever.tokenizer __magic_name__ : Any = np.array([0, 3, 5] , dtype="long" ) __magic_name__ : Dict = tokenizer(["Test question"] ).input_ids __magic_name__ : List[Any] = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids __magic_name__ : List[Any] = config.reader_seq_len __magic_name__ : Tuple = retriever( __UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" ) self.assertEqual([False, True, True] , __UpperCAmelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCAmelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : str = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path __magic_name__ : Any = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: __magic_name__ : Dict = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) __magic_name__ : Any = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
281
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :str = logging.get_logger(__name__) __a :Any = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class _a ( _a ): """simple docstring""" _lowerCamelCase : int = """dpr""" def __init__( self : List[Any] , UpperCAmelCase : int=30522 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : Dict=12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : str=512 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : List[str]=1E-12 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : str="absolute" , UpperCAmelCase : int = 0 , **UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = hidden_act A_ = intermediate_size A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = initializer_range A_ = layer_norm_eps A_ = projection_dim A_ = position_embedding_type
312
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ("foo.json",)] ) def __UpperCamelCase ( self : List[Any] , a : List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__UpperCAmelCase , config_name=__UpperCAmelCase ) SCREAMING_SNAKE_CASE : Tuple = GenerationConfig.from_pretrained(__UpperCAmelCase , config_name=__UpperCAmelCase ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __UpperCAmelCase ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , __UpperCAmelCase ) def __UpperCamelCase ( self : int ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained("gpt2" ) SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_model_config(__UpperCAmelCase ) SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def __UpperCamelCase ( self : Optional[int] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig() SCREAMING_SNAKE_CASE : str = { "max_new_tokens": 1024, "foo": "bar", } SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(__UpperCAmelCase ) SCREAMING_SNAKE_CASE : Tuple = generation_config.update(**__UpperCAmelCase ) # update_kwargs was not modified (no side effects) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__UpperCAmelCase , {"foo": "bar"} ) def __UpperCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig() SCREAMING_SNAKE_CASE : Optional[int] = "bar" with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir: generation_config.save_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_pretrained(__UpperCAmelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar" ) SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_model_config(__UpperCAmelCase ) assert not hasattr(__UpperCAmelCase , "foo" ) # no new kwargs should be initialized if from config def __UpperCamelCase ( self : List[str] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , __UpperCAmelCase ) self.assertEqual(default_config.num_beams , 1 ) SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , __UpperCAmelCase ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig.from_pretrained(__UpperCAmelCase , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , __UpperCAmelCase ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __UpperCamelCase ( cls : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __UpperCamelCase ( cls : Dict ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id="test-generation-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" ) except HTTPError: pass def __UpperCamelCase ( self : str ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __UpperCAmelCase , repo_id="test-generation-config" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Any = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __UpperCAmelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
76
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _lowerCamelCase : Dict = False class __UpperCAmelCase ( unittest.TestCase ): pass @nightly @require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): def __magic_name__ ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Any ): UpperCAmelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''', image=__UpperCAmelCase, text_to_image_strength=0.7_5, generator=__UpperCAmelCase, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''', ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__UpperCAmelCase ) UpperCAmelCase : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase, torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase : int = generator.manual_seed(0 ) UpperCAmelCase : Any = pipe.dual_guided( prompt='''first prompt''', image=__UpperCAmelCase, text_to_image_strength=0.7_5, generator=__UpperCAmelCase, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''', ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __magic_name__ ( self : List[str] ): UpperCAmelCase : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase : Any = "cyberpunk 2077" UpperCAmelCase : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase : List[str] = torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = pipe.dual_guided( prompt=__UpperCAmelCase, image=__UpperCAmelCase, text_to_image_strength=0.7_5, generator=__UpperCAmelCase, guidance_scale=7.5, num_inference_steps=5_0, output_type='''numpy''', ).images UpperCAmelCase : Any = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase : str = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger " UpperCAmelCase : Any = torch.manual_seed(0 ) UpperCAmelCase : Tuple = pipe.text_to_image( prompt=__UpperCAmelCase, generator=__UpperCAmelCase, guidance_scale=7.5, num_inference_steps=5_0, output_type='''numpy''' ).images UpperCAmelCase : List[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase : str = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase : Dict = pipe.image_variation(__UpperCAmelCase, generator=__UpperCAmelCase, output_type='''numpy''' ).images UpperCAmelCase : Dict = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase : Any = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
336
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowercase__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCamelCase ( _a ): '''simple docstring''' a_ : Dict = ["""pixel_values"""] def __init__( self : List[str] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , a_ : Union[int, float] = 1 / 2_55 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = True , **a_ : Optional[Any] , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase_ : Dict = size if size is not None else {"shortest_edge": 2_24} lowerCAmelCase_ : Dict = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) lowerCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowerCAmelCase_ : Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name="crop_size" ) lowerCAmelCase_ : int = do_resize lowerCAmelCase_ : List[Any] = size lowerCAmelCase_ : Dict = resample lowerCAmelCase_ : List[str] = do_center_crop lowerCAmelCase_ : Any = crop_size lowerCAmelCase_ : Dict = do_rescale lowerCAmelCase_ : Optional[int] = rescale_factor lowerCAmelCase_ : Tuple = do_normalize lowerCAmelCase_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase_ : Union[str, Any] = do_convert_rgb def lowerCamelCase ( self : List[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , ): lowerCAmelCase_ : Optional[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCAmelCase_ : Any = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self : Tuple , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Union[str, Any] , ): lowerCAmelCase_ : Tuple = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self : int , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , ): return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self : Union[str, Any] , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[str] , ): return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self : Union[str, Any] , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : int = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **a_ : Union[str, Any] , ): lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Union[str, Any] = size if size is not None else self.size lowerCAmelCase_ : Any = get_size_dict(__UpperCAmelCase , param_name="size" , default_to_square=__UpperCAmelCase ) lowerCAmelCase_ : Dict = resample if resample is not None else self.resample lowerCAmelCase_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size lowerCAmelCase_ : int = get_size_dict(__UpperCAmelCase , param_name="crop_size" , default_to_square=__UpperCAmelCase ) lowerCAmelCase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : Union[str, Any] = image_std if image_std is not None else self.image_std lowerCAmelCase_ : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase_ : Any = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase_ : Union[str, Any] = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase_ : List[str] = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: lowerCAmelCase_ : int = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: lowerCAmelCase_ : List[str] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: lowerCAmelCase_ : List[str] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: lowerCAmelCase_ : List[str] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] lowerCAmelCase_ : int = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] lowerCAmelCase_ : List[Any] = {"pixel_values": images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
241
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
0
from functools import lru_cache @lru_cache def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> int: if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
325
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' while second != 0: __UpperCamelCase :List[str] = first & second first ^= second __UpperCamelCase :Union[str, Any] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __lowercase = int(input('''Enter the first number: ''').strip()) __lowercase = int(input('''Enter the second number: ''').strip()) print(F'{add(first, second) = }')
43
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
0
"""simple docstring""" from typing import List import numpy as np def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : Union[str, Any] = {key: len(A_ ) for key, value in gen_kwargs.items() if isinstance(A_ , A_ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCAmelCase : str = max(lists_lengths.values() , default=0 ) return max(1 , A_ ) def _snake_case ( _snake_case : int , _snake_case : Tuple ): lowerCAmelCase : Tuple = [] for group_idx in range(A_ ): lowerCAmelCase : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCAmelCase : str = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCAmelCase : Tuple = range(A_ , start + num_shards_to_add ) shards_indices_per_group.append(A_ ) return shards_indices_per_group def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ): lowerCAmelCase : Tuple = _number_of_shards_in_gen_kwargs(A_ ) if num_shards == 1: return [dict(A_ )] else: lowerCAmelCase : Tuple = _distribute_shards(num_shards=A_ , max_num_jobs=A_ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(A_ , A_ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(A_ ) ) ] def _snake_case ( _snake_case : Optional[Any] ): return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , A_ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ): lowerCAmelCase : Union[str, Any] = {len(A_ ) for value in gen_kwargs.values() if isinstance(A_ , A_ )} lowerCAmelCase : Optional[int] = {} for size in list_sizes: lowerCAmelCase : Optional[Any] = list(range(A_ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCAmelCase : Union[str, Any] = dict(A_ ) for key, value in shuffled_kwargs.items(): if isinstance(A_ , A_ ): lowerCAmelCase : int = [value[i] for i in indices_per_size[len(A_ )]] return shuffled_kwargs
60
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = len(A_ ) lowercase__ : int = [[0] * n for i in range(A_ )] for i in range(A_ ): lowercase__ : Dict = y_points[i] for i in range(2 , A_ ): for j in range(A_ , A_ ): lowercase__ : Union[str, Any] = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
130
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
"""simple docstring""" def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = [False] * len(A_ ) UpperCAmelCase = [] queue.append(A_ ) UpperCAmelCase = True while queue: UpperCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(A_ ) UpperCAmelCase = True UpperCAmelCase = u return visited[t] def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = [-1] * (len(A_ )) UpperCAmelCase = 0 while bfs(A_ , A_ , A_ , A_ ): UpperCAmelCase = float('Inf' ) UpperCAmelCase = sink while s != source: # Find the minimum value in select path UpperCAmelCase = min(A_ , graph[parent[s]][s] ) UpperCAmelCase = parent[s] max_flow += path_flow UpperCAmelCase = sink while v != source: UpperCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase = parent[v] return max_flow snake_case_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] snake_case_ , snake_case_ = 0, 5 print(ford_fulkerson(graph, source, sink))
78
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
0
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _lowerCamelCase ( _UpperCamelCase=32 , _UpperCamelCase=10 , _UpperCamelCase=100 , _UpperCamelCase=1026 , _UpperCamelCase=True , _UpperCamelCase="data/tokenized_stories_train_wikitext103.jbl" , _UpperCamelCase="igf_context_pairs.jbl" , ): '''simple docstring''' set_seed(3 ) # generate train_data and objective_set __lowerCAmelCase = generate_datasets( A_ , A_ , number=A_ , min_len=1026 , trim=A_ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __lowerCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model __lowerCAmelCase = load_gpta("gpt2" ).to(A_ ) print("computing perplexity on objective set" ) __lowerCAmelCase = compute_perplexity(A_ , A_ , A_ ).item() print("perplexity on objective set:" , A_ ) # collect igf pairs and save to file demo.jbl collect_objective_set(A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=15 , _UpperCamelCase=128 , _UpperCamelCase=100 , _UpperCamelCase="igf_model.pt" , ): '''simple docstring''' set_seed(42 ) # Load pre-trained model __lowerCAmelCase = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model __lowerCAmelCase = SecondaryLearner(A_ ) # Train secondary learner __lowerCAmelCase = train_secondary_learner( A_ , A_ , max_epochs=A_ , batch_size=A_ , eval_freq=100 , igf_model_path=A_ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=32 , _UpperCamelCase=1000 , _UpperCamelCase=16 , _UpperCamelCase=1.0 , _UpperCamelCase=recopy_gpta , _UpperCamelCase=None , _UpperCamelCase=10 , _UpperCamelCase="gpt2_finetuned.pt" , ): '''simple docstring''' __lowerCAmelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) __lowerCAmelCase = RandomSampler(A_ ) __lowerCAmelCase = DataLoader(A_ , sampler=A_ ) __lowerCAmelCase = max_steps // (len(A_ )) + 1 __lowerCAmelCase = 0 __lowerCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=A_ ) __lowerCAmelCase = recopy_model(A_ , A_ , A_ ) model.train() if secondary_learner is not None: secondary_learner.to(A_ ) secondary_learner.eval() __lowerCAmelCase = [] __lowerCAmelCase = 0 __lowerCAmelCase = [] __lowerCAmelCase = [] # Compute the performance of the transformer model at the beginning __lowerCAmelCase = compute_perplexity(A_ , A_ , A_ ) test_perps.append(A_ ) print("Test perplexity, step" , A_ , ":" , A_ ) for epoch in range(int(A_ ) ): for step, example in enumerate(A_ ): torch.cuda.empty_cache() __lowerCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 ) __lowerCAmelCase = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __lowerCAmelCase = model(A_ , labels=A_ ) __lowerCAmelCase = True if secondary_learner is not None: __lowerCAmelCase = secondary_learner.forward( torch.tensor(A_ , dtype=torch.long , device=A_ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(A_ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: __lowerCAmelCase = -1 if predicted_q < threshold: __lowerCAmelCase = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __lowerCAmelCase = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __lowerCAmelCase = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __lowerCAmelCase = compute_perplexity(A_ , A_ , A_ ) test_perps.append(A_ ) print("Test perplexity, step" , A_ , ":" , A_ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , A_ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=A_ , default=A_ , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=A_ , default=A_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=A_ , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=A_ , default=A_ , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=A_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=100 , type=A_ , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=100 , type=A_ , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=1000 , type=A_ , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=128 , type=A_ , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=A_ , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=A_ , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=100 , type=A_ , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=1026 , type=A_ , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=A_ , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=A_ , type=A_ , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=A_ , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=A_ , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=A_ , type=A_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=A_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner __lowerCAmelCase = joblib.load("data/IGF_values.jbl" ) # Train secondary learner __lowerCAmelCase = training_secondary_learner( A_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model __lowerCAmelCase = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model __lowerCAmelCase = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=A_ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( A_ , A_ , A_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=A_ , secondary_learner=A_ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
57
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging snake_case : Optional[int] = logging.get_logger(__name__) class _snake_case ( _a ): UpperCamelCase__ = ["""input_features"""] def __init__( self , _a=80 , _a=16_000 , _a=160 , _a=30 , _a=400 , _a=0.0 , _a=False , **_a , ): super().__init__( feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) __magic_name__ : Optional[int] = n_fft __magic_name__ : Union[str, Any] = hop_length __magic_name__ : Optional[int] = chunk_length __magic_name__ : str = chunk_length * sampling_rate __magic_name__ : List[str] = self.n_samples // hop_length __magic_name__ : Dict = sampling_rate __magic_name__ : List[str] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__UpperCAmelCase , norm="slaney" , mel_scale="slaney" , ) def SCREAMING_SNAKE_CASE ( self , _a ): __magic_name__ : List[str] = spectrogram( __UpperCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) __magic_name__ : Union[str, Any] = log_spec[:, :-1] __magic_name__ : List[str] = np.maximum(__UpperCAmelCase , log_spec.max() - 8.0 ) __magic_name__ : Dict = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def SCREAMING_SNAKE_CASE ( _a , _a , _a = 0.0 ): if attention_mask is not None: __magic_name__ : str = np.array(__UpperCAmelCase , np.intaa ) __magic_name__ : Dict = [] for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ): __magic_name__ : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: __magic_name__ : Any = padding_value normed_input_values.append(__UpperCAmelCase ) else: __magic_name__ : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self , _a , _a = True , _a = None , _a = None , _a = None , _a = "max_length" , _a = None , _a = None , _a = None , **_a , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) __magic_name__ : Tuple = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __magic_name__ : str = is_batched_numpy or ( isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __magic_name__ : Optional[int] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ): __magic_name__ : str = np.asarray(__UpperCAmelCase , dtype=np.floataa ) elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __magic_name__ : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __magic_name__ : Union[str, Any] = [np.asarray([raw_speech] ).T] __magic_name__ : Any = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding __magic_name__ : List[str] = self.pad( __UpperCAmelCase , padding=__UpperCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: __magic_name__ : int = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) __magic_name__ : int = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format __magic_name__ : Optional[Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) __magic_name__ : Optional[int] = [self._np_extract_fbank_features(__UpperCAmelCase ) for waveform in input_features[0]] if isinstance(input_features[0] , __UpperCAmelCase ): __magic_name__ : Union[str, Any] = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in input_features] else: __magic_name__ : Union[str, Any] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) __magic_name__ : Union[str, Any] = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: __magic_name__ : int = padded_inputs.convert_to_tensors(__UpperCAmelCase ) return padded_inputs def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Tuple = copy.deepcopy(self.__dict__ ) __magic_name__ : Optional[int] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
281
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" if digit_amount > 0: return round(number - int(A_ ) ,A_ ) return number - int(A_ ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
312
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
0
def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : int = False while is_sorted is False: # Until all the indices are traversed keep looping SCREAMING_SNAKE_CASE : Optional[Any] = True for i in range(0 , len(A_) - 1 , 2): # iterating over all even indices if input_list[i] > input_list[i + 1]: SCREAMING_SNAKE_CASE : Any = input_list[i + 1], input_list[i] # swapping if elements not in order SCREAMING_SNAKE_CASE : Optional[Any] = False for i in range(1 , len(A_) - 1 , 2): # iterating over all odd indices if input_list[i] > input_list[i + 1]: SCREAMING_SNAKE_CASE : Optional[int] = input_list[i + 1], input_list[i] # swapping if elements not in order SCREAMING_SNAKE_CASE : str = False return input_list if __name__ == "__main__": print('Enter list to be sorted') a_ = [int(x) for x in input().split()] # inputing elements of the list in one line a_ = odd_even_sort(input_list) print('The sorted list is') print(sorted_list)
76
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
import torch from diffusers import StableDiffusionPipeline _lowerCamelCase : Optional[Any] = "path-to-your-trained-model" _lowerCamelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda") _lowerCamelCase : List[str] = "A photo of sks dog in a bucket" _lowerCamelCase : Optional[int] = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0] image.save("dog-bucket.png")
336
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
0
"""simple docstring""" import random def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> tuple: """simple docstring""" lowerCAmelCase_ : List[str] = [], [], [] for element in data: if element < pivot: less.append(A_ ) elif element > pivot: greater.append(A_ ) else: equal.append(A_ ) return less, equal, greater def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Dict: """simple docstring""" if index >= len(A_ ) or index < 0: return None lowerCAmelCase_ : Optional[Any] = items[random.randint(0 , len(A_ ) - 1 )] lowerCAmelCase_ : int = 0 lowerCAmelCase_ : Any = _partition(A_ , A_ ) lowerCAmelCase_ : Union[str, Any] = len(A_ ) lowerCAmelCase_ : Any = len(A_ ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(A_ , A_ ) # must be in larger else: return quick_select(A_ , index - (m + count) )
241
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
import unittest from knapsack import knapsack as k class A__ ( unittest.TestCase ): def a__ ( self : List[Any] ) -> str: """simple docstring""" __lowercase = 0 __lowercase = [0] __lowercase = [0] __lowercase = len(__UpperCAmelCase ) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 0 ) __lowercase = [60] __lowercase = [10] __lowercase = len(__UpperCAmelCase ) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 0 ) def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = 3 __lowercase = [1, 2, 3] __lowercase = [3, 2, 1] __lowercase = len(__UpperCAmelCase ) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 5 ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" __lowercase = 50 __lowercase = [60, 1_00, 1_20] __lowercase = [10, 20, 30] __lowercase = len(__UpperCAmelCase ) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 2_20 ) if __name__ == "__main__": unittest.main()
325
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
0
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( _a ): '''simple docstring''' def __init__( self , __lowercase , __lowercase) -> int: super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase) @torch.no_grad() def __call__( self , __lowercase = 1 , __lowercase = 100 , __lowercase = None , __lowercase = None , __lowercase = True , ) -> Any: if audio_length_in_s is None: __UpperCamelCase :List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate __UpperCamelCase :Optional[Any] = audio_length_in_s * self.unet.config.sample_rate __UpperCamelCase :Optional[Any] = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it\'s bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""") __UpperCamelCase :int = int(__UpperCAmelCase) if sample_size % down_scale_factor != 0: __UpperCamelCase :Any = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''') __UpperCamelCase :List[Any] = int(__UpperCAmelCase) __UpperCamelCase :Dict = next(iter(self.unet.parameters())).dtype __UpperCamelCase :str = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase) and len(__UpperCAmelCase) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(__UpperCAmelCase)}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") __UpperCamelCase :List[Any] = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase) # set step values self.scheduler.set_timesteps(__UpperCAmelCase , device=audio.device) __UpperCamelCase :List[str] = self.scheduler.timesteps.to(__UpperCAmelCase) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output __UpperCamelCase :Dict = self.unet(__UpperCAmelCase , __UpperCAmelCase).sample # 2. compute previous image: x_t -> t_t-1 __UpperCamelCase :Optional[Any] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample __UpperCamelCase :Tuple = audio.clamp(-1 , 1).float().cpu().numpy() __UpperCamelCase :str = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__UpperCAmelCase)
43
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
0
"""simple docstring""" from bisect import bisect from itertools import accumulate def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ): lowerCAmelCase : Any = sorted(zip(A_ , A_ ) , key=lambda _snake_case : x[0] / x[1] , reverse=A_ ) lowerCAmelCase : int = [i[0] for i in r], [i[1] for i in r] lowerCAmelCase : Union[str, Any] = list(accumulate(A_ ) ) lowerCAmelCase : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
60
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
0
import os from datetime import datetime as dt from github import Github lowerCAmelCase__ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Union[str, Any] = Github(os.environ["GITHUB_TOKEN"] ) lowercase__ : Union[str, Any] = g.get_repo("huggingface/diffusers" ) lowercase__ : Union[str, Any] = repo.get_issues(state="open" ) for issue in open_issues: lowercase__ : str = sorted(issue.get_comments() , key=lambda lowerCamelCase__ : i.created_at , reverse=A_ ) lowercase__ : Tuple = comments[0] if len(A_ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
130
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
0
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class A_ ( unittest.TestCase ): """simple docstring""" __UpperCamelCase = MODEL_FOR_MASKED_LM_MAPPING __UpperCamelCase = TF_MODEL_FOR_MASKED_LM_MAPPING def UpperCAmelCase__ ( self :Tuple ) -> Dict: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def UpperCAmelCase__ ( self :Optional[int] ) -> str: UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) UpperCAmelCase = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 3_80_15, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 2_55_06, 'token_str': ' accuser'}, ] , ) UpperCAmelCase = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1E-05, 'token': 3_80_15, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1E-05, 'token': 2_55_06, 'token_str': ' accuser', }, ] , ) UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_36_06, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 34_99, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 29_41, 'token_str': ' Te'}, ] , ) @require_torch def UpperCAmelCase__ ( self :Optional[int] ) -> int: UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) UpperCAmelCase = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 3_56_76, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS'}, ] , ) UpperCAmelCase = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2E-05, 'token': 3_56_76, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS'}, ] , ) UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 34_99, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2E-05, 'token': 29_41, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_36_06, 'token_str': ' Clara'}, ] , ) UpperCAmelCase = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ [ { 'score': 2.2E-05, 'token': 3_56_76, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2E-05, 'token': 3_56_76, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def UpperCAmelCase__ ( self :int ) -> Optional[Any]: UpperCAmelCase = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() UpperCAmelCase = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow @require_torch def UpperCAmelCase__ ( self :Dict ) -> str: UpperCAmelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(__UpperCAmelCase ) @slow @require_tf def UpperCAmelCase__ ( self :Optional[int] ) -> Any: UpperCAmelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(__UpperCAmelCase ) def UpperCAmelCase__ ( self :int , lowercase_ :Optional[int] ) -> Dict: UpperCAmelCase = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'sequence': 'My name is John', 'score': 0.008, 'token': 6_10, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.007, 'token': 15_73, 'token_str': ' Chris'}, ] , ) UpperCAmelCase = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.251, 'token': 22_01, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.214, 'token': 1_27_90, 'token_str': ' Lyon', }, ] , ) UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'sequence': 'My name is Patrick', 'score': 0.005, 'token': 34_99, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_36_06, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.000, 'token': 29_41, 'token_str': ' Te'}, ] , ) @require_torch def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]: UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) UpperCAmelCase = None UpperCAmelCase = None self.run_pipeline_test(__UpperCAmelCase , [] ) @require_tf def UpperCAmelCase__ ( self :Tuple ) -> List[str]: UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) UpperCAmelCase = None UpperCAmelCase = None self.run_pipeline_test(__UpperCAmelCase , [] ) def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :Tuple , lowercase_ :Optional[Any] ) -> str: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) UpperCAmelCase = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) UpperCAmelCase = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Optional[Any] ) -> List[Any]: UpperCAmelCase = fill_masker.tokenizer UpperCAmelCase = fill_masker.model UpperCAmelCase = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( __UpperCAmelCase , [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ] , ) UpperCAmelCase = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( __UpperCAmelCase , [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ] , ) UpperCAmelCase = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( __UpperCAmelCase , [ [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ], [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ], ] , ) with self.assertRaises(__UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase ): fill_masker('This is' ) self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :int ) -> Tuple: UpperCAmelCase = tokenizer.get_vocab() UpperCAmelCase = sorted(vocab.keys() )[:2] # Pipeline argument UpperCAmelCase = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase ) UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __UpperCAmelCase , [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ] , ) UpperCAmelCase = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , __UpperCAmelCase ) UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(__UpperCAmelCase ) ) # Call argument UpperCAmelCase = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ] , ) UpperCAmelCase = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , __UpperCAmelCase ) UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(__UpperCAmelCase ) ) # Score equivalence UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__UpperCAmelCase ) UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs] UpperCAmelCase = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ) == set(__UpperCAmelCase ): UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__UpperCAmelCase ) UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(__UpperCAmelCase ): UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase ): UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(__UpperCAmelCase ): UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' ) def UpperCAmelCase__ ( self :int , lowercase_ :Optional[Any] , lowercase_ :str ) -> Optional[int]: UpperCAmelCase = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 ) UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __UpperCAmelCase , [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ] , ) UpperCAmelCase = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] , lowercase_ :str ) -> Any: UpperCAmelCase = tokenizer.get_vocab() UpperCAmelCase = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # top_k=2, ntargets=3 UpperCAmelCase = sorted(vocab.keys() )[:3] UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCAmelCase = [el["token_str"] for el in sorted(__UpperCAmelCase , key=lambda lowercase_ : x["score"] , reverse=__UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ): UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def UpperCAmelCase__ ( self :int , lowercase_ :Any , lowercase_ :Any ) -> List[str]: UpperCAmelCase = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) UpperCAmelCase = tokenizer.get_vocab() # String duplicates + id duplicates UpperCAmelCase = sorted(vocab.keys() )[:3] UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCAmelCase = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase ) , 3 ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] ) -> Tuple: UpperCAmelCase = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) UpperCAmelCase = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ], [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ], [ {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, {'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )}, ], ] , )
78
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
0
"""simple docstring""" class _UpperCamelCase : '''simple docstring''' def __init__( self , __a ): __lowerCAmelCase = size __lowerCAmelCase = [0] * size __lowerCAmelCase = [0] * size @staticmethod def snake_case ( __a ): return index | (index + 1) @staticmethod def snake_case ( __a ): return (index & (index + 1)) - 1 def snake_case ( self , __a , __a ): __lowerCAmelCase = value while index < self.size: __lowerCAmelCase = self.get_prev(__UpperCAmelCase ) + 1 if current_left_border == index: __lowerCAmelCase = value else: __lowerCAmelCase = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __lowerCAmelCase = self.get_next(__UpperCAmelCase ) def snake_case ( self , __a , __a ): right -= 1 # Because of right is exclusive __lowerCAmelCase = 0 while left <= right: __lowerCAmelCase = self.get_prev(__UpperCAmelCase ) if left <= current_left: __lowerCAmelCase = max(__UpperCAmelCase , self.tree[right] ) __lowerCAmelCase = current_left else: __lowerCAmelCase = max(__UpperCAmelCase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
57
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
import doctest from collections import deque import numpy as np class _snake_case : def __init__( self ): __magic_name__ : Dict = [2, 1, 2, -1] __magic_name__ : Dict = [1, 2, 3, 4] def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = len(self.first_signal ) __magic_name__ : int = len(self.second_signal ) __magic_name__ : Tuple = max(__UpperCAmelCase , __UpperCAmelCase ) # create a zero matrix of max_length x max_length __magic_name__ : Any = [[0] * max_length for i in range(__UpperCAmelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(__UpperCAmelCase ): __magic_name__ : str = deque(self.second_signal ) rotated_signal.rotate(__UpperCAmelCase ) for j, item in enumerate(__UpperCAmelCase ): matrix[i][j] += item # multiply the matrix with the first signal __magic_name__ : Any = np.matmul(np.transpose(__UpperCAmelCase ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(__UpperCAmelCase , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
281
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
0
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" return int((input_a, input_a).count(0 ) == 0 ) def __snake_case ( ): """simple docstring""" assert and_gate(0 ,0 ) == 0 assert and_gate(0 ,1 ) == 0 assert and_gate(1 ,0 ) == 0 assert and_gate(1 ,1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
312
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
0